diff --git a/go.mod b/go.mod
index 172b786dcd845..784e8422c67a7 100644
--- a/go.mod
+++ b/go.mod
@@ -42,14 +42,14 @@ require (
github.com/aquasecurity/libbpfgo v0.5.1-libbpf-1.2
github.com/armon/go-radix v1.0.0
github.com/aws/aws-sdk-go v1.52.2
- github.com/aws/aws-sdk-go-v2 v1.26.1
+ github.com/aws/aws-sdk-go-v2 v1.30.4
github.com/aws/aws-sdk-go-v2/config v1.27.11
github.com/aws/aws-sdk-go-v2/credentials v1.17.11
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4
- github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0
+ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.5
github.com/aws/aws-sdk-go-v2/service/ec2 v1.160.0
github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.23.2
github.com/aws/aws-sdk-go-v2/service/ecs v1.41.7
@@ -64,7 +64,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sqs v1.31.4
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6
github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin v1.1.0
- github.com/aws/smithy-go v1.20.2
+ github.com/aws/smithy-go v1.20.4
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de
github.com/beevik/etree v1.3.0
github.com/buildkite/bintest/v3 v3.2.0
@@ -256,16 +256,16 @@ require (
github.com/apache/arrow/go/v15 v15.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.4
github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssm v1.50.0
@@ -538,6 +538,11 @@ require (
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
+require (
+ github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue v1.13.68
+ github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.31.1
+)
+
// Update also `ignore` in .github/dependabot.yml.
replace (
github.com/alecthomas/kingpin/v2 => github.com/gravitational/kingpin/v2 v2.1.11-0.20230515143221-4ec6b70ecd33
diff --git a/go.sum b/go.sum
index 3588e1d39cfe2..b2ad6621eeade 100644
--- a/go.sum
+++ b/go.sum
@@ -829,8 +829,8 @@ github.com/aws/aws-sdk-go v1.49.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3Tj
github.com/aws/aws-sdk-go v1.52.2 h1:l4g9wBXRBlvCtScvv4iLZCzLCtR7BFJcXOnOGQ20orw=
github.com/aws/aws-sdk-go v1.52.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
-github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
-github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8=
+github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
@@ -841,28 +841,32 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHH
github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo=
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14 h1:MqN3V/VApAVAheStH43Dl3BWuGE712Cp5s97WmCMbYQ=
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14/go.mod h1:WwwihVdoE2S7TTziJGvgWaHI8HlOt1DwO6DM338pkzo=
+github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue v1.13.68 h1:F3MGCAxIo5mvwzcugKTiH6wOYbIN986EPpjx8DM/Kp4=
+github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue v1.13.68/go.mod h1:tvEPA4Pmuwj37V2+ippUhPWr6aa7FjnE0O64yiPbKEE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
+github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.31.1 h1:k9Naq87QuDek6vxzZBAElQxVD6r+Zw1EuHUn5OnId6Q=
+github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.31.1/go.mod h1:YzuGTQ6UfGKCrzpSDF62R0OzvcXBy8zTK7R9ILU44Iw=
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 h1:tiHIjFXSyb5DbNfnu3ql2r86s6llLdzwWAVJkPgw/I0=
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4/go.mod h1:6OHesqDfYPNzYI+VaXtmylYLyppuUy9SwRk4CH/pQA4=
-github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0 h1:tGV+9T7NwSJNky5tGLh6/i7CoIkd9fPiGWDn9u4PWgI=
-github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0/go.mod h1:lVLqEtX+ezgtfalyJs7Peb0uv9dEpAQP5yuq2O26R44=
-github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 h1:hSwDD19/e01z3pfyx+hDeX5T/0Sn+ZEnnTO5pVWKWx8=
-github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4/go.mod h1:61CuGwE7jYn0g2gl7K3qoT4vCY59ZQEixkPu8PN5IrE=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.5 h1:Cm77yt+/CV7A6DglkENsWA3H1hq8+4ItJnFKrhxHkvg=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.5/go.mod h1:s2fYaueBuCnwv1XQn6T8TfShxJWusv5tWPMcL+GY6+g=
+github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.4 h1:qOvCqaiLTc0MnIdZr0LbdtJKetiRscHxi+9XjjtlEAs=
+github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.4/go.mod h1:3YxVsEoCNYOLIbdA+cCXSp1fom9hrhyB1DsCiYryCaQ=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.160.0 h1:ooy0OFbrdSwgk32OFGPnvBwry5ySYCKkgTEbQ2hejs8=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.160.0/go.mod h1:xejKuuRDjz6z5OqyeLsz01MlOqqW7CqpAB4PabNvpu8=
github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.23.2 h1:TX5ZoNUPAV7lt6+K9E3av+8lLh3yQfiFY78I0NDBMrQ=
@@ -879,12 +883,12 @@ github.com/aws/aws-sdk-go-v2/service/glue v1.80.0 h1:Br4I/7NPRMLQiMjumZHG42ORoli
github.com/aws/aws-sdk-go-v2/service/glue v1.80.0/go.mod h1:maQT+ebL6UAFXYp8fJlK2Dv/s42LZuggi2l6pVeE2B4=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
-github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 h1:6tayEze2Y+hiL3kdnEUxSPsP+pJsUfwLSFspFl1ru9Q=
-github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6/go.mod h1:qVNb/9IOVsLCZh0x2lnagrBwQ9fxajUpXS7OZfIsKn0=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17 h1:HDJGz1jlV7RokVgTPfx1UHBHANC0N5Uk++xgyYgz5E0=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17/go.mod h1:5szDu6TWdRDytfDxUQVv2OYfpTQMKApVFyqpm+TcA98=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
@@ -918,8 +922,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2M
github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin v1.1.0 h1:EJsHUYgFBV7/N1YtL73lsfZODAOU+CnNSZfEAlqqQaA=
github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin v1.1.0/go.mod h1:AxKuXHc0zv2yYaeueUG7R3ONbcnQIuDj0bkdFmPVRzU=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
-github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
+github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de h1:GpfVZg7GMoefvIxKI+BC+4Fap8eCAAQ7a6Ww2l9HRnM=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240323062759-1fd604ae58de/go.mod h1:pJQomIo4A5X9k8E30Q4A2BAJckIwQhC1TAFN/WXCkdk=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
diff --git a/integrations/event-handler/go.mod b/integrations/event-handler/go.mod
index 4fccd9253e139..b242031e54a7f 100644
--- a/integrations/event-handler/go.mod
+++ b/integrations/event-handler/go.mod
@@ -62,14 +62,14 @@ require (
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.52.2 // indirect
- github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 // indirect
@@ -79,7 +79,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/eks v1.42.1 // indirect
github.com/aws/aws-sdk-go-v2/service/glue v1.80.0 // indirect
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
@@ -89,7 +89,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
- github.com/aws/smithy-go v1.20.2 // indirect
+ github.com/aws/smithy-go v1.20.4 // indirect
github.com/beevik/etree v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
diff --git a/integrations/event-handler/go.sum b/integrations/event-handler/go.sum
index 7f68ed0a2a181..62609415e25d0 100644
--- a/integrations/event-handler/go.sum
+++ b/integrations/event-handler/go.sum
@@ -719,8 +719,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.52.2 h1:l4g9wBXRBlvCtScvv4iLZCzLCtR7BFJcXOnOGQ20orw=
github.com/aws/aws-sdk-go v1.52.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
-github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8=
+github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA=
@@ -731,10 +731,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYh
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
@@ -753,8 +753,8 @@ github.com/aws/aws-sdk-go-v2/service/glue v1.80.0 h1:Br4I/7NPRMLQiMjumZHG42ORoli
github.com/aws/aws-sdk-go-v2/service/glue v1.80.0/go.mod h1:maQT+ebL6UAFXYp8fJlK2Dv/s42LZuggi2l6pVeE2B4=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
@@ -773,8 +773,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2K
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
-github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
-github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
+github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU=
github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
diff --git a/integrations/terraform/go.mod b/integrations/terraform/go.mod
index a5919a5630ee6..b8a8852997496 100644
--- a/integrations/terraform/go.mod
+++ b/integrations/terraform/go.mod
@@ -74,14 +74,14 @@ require (
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.52.2 // indirect
- github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 // indirect
@@ -91,7 +91,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/eks v1.42.1 // indirect
github.com/aws/aws-sdk-go-v2/service/glue v1.80.0 // indirect
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
@@ -101,7 +101,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
- github.com/aws/smithy-go v1.20.2 // indirect
+ github.com/aws/smithy-go v1.20.4 // indirect
github.com/beevik/etree v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bgentry/speakeasy v0.1.0 // indirect
diff --git a/integrations/terraform/go.sum b/integrations/terraform/go.sum
index 45d48438228b8..44b1ce1ee2647 100644
--- a/integrations/terraform/go.sum
+++ b/integrations/terraform/go.sum
@@ -772,30 +772,38 @@ github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3A
github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.52.2 h1:l4g9wBXRBlvCtScvv4iLZCzLCtR7BFJcXOnOGQ20orw=
github.com/aws/aws-sdk-go v1.52.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
-github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8=
+github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA=
github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs=
github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo=
+github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14 h1:MqN3V/VApAVAheStH43Dl3BWuGE712Cp5s97WmCMbYQ=
+github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14/go.mod h1:WwwihVdoE2S7TTziJGvgWaHI8HlOt1DwO6DM338pkzo=
+github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue v1.13.68 h1:F3MGCAxIo5mvwzcugKTiH6wOYbIN986EPpjx8DM/Kp4=
+github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue v1.13.68/go.mod h1:tvEPA4Pmuwj37V2+ippUhPWr6aa7FjnE0O64yiPbKEE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
+github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.31.1 h1:k9Naq87QuDek6vxzZBAElQxVD6r+Zw1EuHUn5OnId6Q=
+github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.31.1/go.mod h1:YzuGTQ6UfGKCrzpSDF62R0OzvcXBy8zTK7R9ILU44Iw=
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 h1:tiHIjFXSyb5DbNfnu3ql2r86s6llLdzwWAVJkPgw/I0=
github.com/aws/aws-sdk-go-v2/service/athena v1.40.4/go.mod h1:6OHesqDfYPNzYI+VaXtmylYLyppuUy9SwRk4CH/pQA4=
-github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0 h1:tGV+9T7NwSJNky5tGLh6/i7CoIkd9fPiGWDn9u4PWgI=
-github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0/go.mod h1:lVLqEtX+ezgtfalyJs7Peb0uv9dEpAQP5yuq2O26R44=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.5 h1:Cm77yt+/CV7A6DglkENsWA3H1hq8+4ItJnFKrhxHkvg=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.5/go.mod h1:s2fYaueBuCnwv1XQn6T8TfShxJWusv5tWPMcL+GY6+g=
+github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.4 h1:qOvCqaiLTc0MnIdZr0LbdtJKetiRscHxi+9XjjtlEAs=
+github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.4/go.mod h1:3YxVsEoCNYOLIbdA+cCXSp1fom9hrhyB1DsCiYryCaQ=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.160.0 h1:ooy0OFbrdSwgk32OFGPnvBwry5ySYCKkgTEbQ2hejs8=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.160.0/go.mod h1:xejKuuRDjz6z5OqyeLsz01MlOqqW7CqpAB4PabNvpu8=
github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.23.2 h1:TX5ZoNUPAV7lt6+K9E3av+8lLh3yQfiFY78I0NDBMrQ=
@@ -808,12 +816,12 @@ github.com/aws/aws-sdk-go-v2/service/glue v1.80.0 h1:Br4I/7NPRMLQiMjumZHG42ORoli
github.com/aws/aws-sdk-go-v2/service/glue v1.80.0/go.mod h1:maQT+ebL6UAFXYp8fJlK2Dv/s42LZuggi2l6pVeE2B4=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU=
github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
-github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 h1:6tayEze2Y+hiL3kdnEUxSPsP+pJsUfwLSFspFl1ru9Q=
-github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6/go.mod h1:qVNb/9IOVsLCZh0x2lnagrBwQ9fxajUpXS7OZfIsKn0=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17 h1:HDJGz1jlV7RokVgTPfx1UHBHANC0N5Uk++xgyYgz5E0=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17/go.mod h1:5szDu6TWdRDytfDxUQVv2OYfpTQMKApVFyqpm+TcA98=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys=
@@ -836,8 +844,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin v1.1.0 h1:EJsHUYgFBV7/N1YtL73lsfZODAOU+CnNSZfEAlqqQaA=
github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin v1.1.0/go.mod h1:AxKuXHc0zv2yYaeueUG7R3ONbcnQIuDj0bkdFmPVRzU=
-github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
-github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
+github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
+github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU=
github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
diff --git a/lib/backend/dynamo/atomicwrite.go b/lib/backend/dynamo/atomicwrite.go
index 78a506a1a5b13..5fda59441c2bd 100644
--- a/lib/backend/dynamo/atomicwrite.go
+++ b/lib/backend/dynamo/atomicwrite.go
@@ -26,9 +26,10 @@ import (
"strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
"github.com/gravitational/trace"
"github.com/gravitational/teleport"
@@ -58,12 +59,12 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
tableName := aws.String(b.TableName)
- var txnItems []*dynamodb.TransactWriteItem
+ var txnItems []types.TransactWriteItem
var includesPut bool
for _, ca := range condacts {
var condExpr *string
- var exprAttrValues map[string]*dynamodb.AttributeValue
+ var exprAttrValues map[string]types.AttributeValue
switch ca.Condition.Kind {
case backend.KindWhatever:
@@ -83,8 +84,8 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
default:
// revision is expected to be present and well-defined
condExpr = &revisionExpr
- exprAttrValues = map[string]*dynamodb.AttributeValue{
- ":rev": {S: aws.String(ca.Condition.Revision)},
+ exprAttrValues = map[string]types.AttributeValue{
+ ":rev": &types.AttributeValueMemberS{Value: ca.Condition.Revision},
}
}
default:
@@ -93,11 +94,11 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
fullPath := prependPrefix(ca.Key)
- var txnItem dynamodb.TransactWriteItem
+ var txnItem types.TransactWriteItem
switch ca.Action.Kind {
case backend.KindNop:
- av, err := dynamodbattribute.MarshalMap(keyLookup{
+ av, err := attributevalue.MarshalMap(keyLookup{
HashKey: hashKey,
FullPath: fullPath,
})
@@ -105,7 +106,7 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
return "", trace.Wrap(err)
}
- txnItem.ConditionCheck = &dynamodb.ConditionCheck{
+ txnItem.ConditionCheck = &types.ConditionCheck{
ConditionExpression: condExpr,
ExpressionAttributeValues: exprAttrValues,
Key: av,
@@ -125,19 +126,19 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
r.Expires = aws.Int64(ca.Action.Item.Expires.UTC().Unix())
}
- av, err := dynamodbattribute.MarshalMap(r)
+ av, err := attributevalue.MarshalMap(r)
if err != nil {
return "", trace.Wrap(err)
}
- txnItem.Put = &dynamodb.Put{
+ txnItem.Put = &types.Put{
ConditionExpression: condExpr,
ExpressionAttributeValues: exprAttrValues,
Item: av,
TableName: tableName,
}
case backend.KindDelete:
- av, err := dynamodbattribute.MarshalMap(keyLookup{
+ av, err := attributevalue.MarshalMap(keyLookup{
HashKey: hashKey,
FullPath: fullPath,
})
@@ -145,7 +146,7 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
return "", trace.Wrap(err)
}
- txnItem.Delete = &dynamodb.Delete{
+ txnItem.Delete = &types.Delete{
ConditionExpression: condExpr,
ExpressionAttributeValues: exprAttrValues,
Key: av,
@@ -156,7 +157,7 @@ func (b *Backend) AtomicWrite(ctx context.Context, condacts []backend.Conditiona
return "", trace.BadParameter("unexpected action kind %v in conditional action against key %q", ca.Action.Kind, ca.Key)
}
- txnItems = append(txnItems, &txnItem)
+ txnItems = append(txnItems, txnItem)
}
// dynamo cancels overlapping transactions without evaluating their conditions. the AtomicWrite API is expected to only fail
@@ -193,11 +194,11 @@ TxnLoop:
}
// execute the transaction
- _, err = b.svc.TransactWriteItemsWithContext(ctx, &dynamodb.TransactWriteItemsInput{
+ _, err = b.svc.TransactWriteItems(ctx, &dynamodb.TransactWriteItemsInput{
TransactItems: txnItems,
})
if err != nil {
- txnErr := &dynamodb.TransactionCanceledException{}
+ var txnErr *types.TransactionCanceledException
if !errors.As(err, &txnErr) {
if s := err.Error(); strings.Contains(s, "AccessDenied") && strings.Contains(s, "dynamodb:ConditionCheckItem") {
b.Warnf("AtomicWrite failed with error that may indicate dynamodb is missing the required dynamodb:ConditionCheckItem permission (this permission is now required for teleport v16 and later). Consider updating your IAM policy to include this permission. Original error: %v", err)
@@ -213,15 +214,14 @@ TxnLoop:
var conditionFailed bool
var txnConflict bool
for _, reason := range txnErr.CancellationReasons {
- if reason.Code == nil {
- continue
- }
-
- switch *reason.Code {
- case dynamodb.BatchStatementErrorCodeEnumConditionalCheckFailed:
+ code := aws.ToString(reason.Code)
+ switch types.BatchStatementErrorCodeEnum(code) {
+ case types.BatchStatementErrorCodeEnumConditionalCheckFailed:
conditionFailed = true
- case dynamodb.BatchStatementErrorCodeEnumTransactionConflict:
+ case types.BatchStatementErrorCodeEnumTransactionConflict:
txnConflict = true
+ case "":
+ continue
}
}
diff --git a/lib/backend/dynamo/configure.go b/lib/backend/dynamo/configure.go
deleted file mode 100644
index ef933056527d7..0000000000000
--- a/lib/backend/dynamo/configure.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2023 Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package dynamo
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/applicationautoscaling"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
-)
-
-// SetContinuousBackups enables continuous backups.
-func SetContinuousBackups(ctx context.Context, svc dynamodbiface.DynamoDBAPI, tableName string) error {
- // Make request to AWS to update continuous backups settings.
- _, err := svc.UpdateContinuousBackupsWithContext(ctx, &dynamodb.UpdateContinuousBackupsInput{
- PointInTimeRecoverySpecification: &dynamodb.PointInTimeRecoverySpecification{
- PointInTimeRecoveryEnabled: aws.Bool(true),
- },
- TableName: aws.String(tableName),
- })
- if err != nil {
- return convertError(err)
- }
-
- return nil
-}
-
-// AutoScalingParams defines auto scaling parameters for DynamoDB.
-type AutoScalingParams struct {
- // ReadMaxCapacity is the maximum provisioned read capacity.
- ReadMaxCapacity int64
- // ReadMinCapacity is the minimum provisioned read capacity.
- ReadMinCapacity int64
- // ReadTargetValue is the ratio of consumed read to provisioned capacity.
- ReadTargetValue float64
- // WriteMaxCapacity is the maximum provisioned write capacity.
- WriteMaxCapacity int64
- // WriteMinCapacity is the minimum provisioned write capacity.
- WriteMinCapacity int64
- // WriteTargetValue is the ratio of consumed write to provisioned capacity.
- WriteTargetValue float64
-}
-
-// SetAutoScaling enables auto-scaling for the specified table with given configuration.
-func SetAutoScaling(ctx context.Context, svc *applicationautoscaling.ApplicationAutoScaling, resourceID string, params AutoScalingParams) error {
- readDimension := applicationautoscaling.ScalableDimensionDynamodbTableReadCapacityUnits
- writeDimension := applicationautoscaling.ScalableDimensionDynamodbTableWriteCapacityUnits
-
- // Check if the resource ID refers to an index - those IDs have the following form:
- // 'table//index/'
- //
- // Indices use a slightly different scaling dimension than tables
- if strings.Contains(resourceID, "/index/") {
- readDimension = applicationautoscaling.ScalableDimensionDynamodbIndexReadCapacityUnits
- writeDimension = applicationautoscaling.ScalableDimensionDynamodbIndexWriteCapacityUnits
- }
-
- // Define scaling targets. Defines minimum and maximum {read,write} capacity.
- if _, err := svc.RegisterScalableTargetWithContext(ctx, &applicationautoscaling.RegisterScalableTargetInput{
- MinCapacity: aws.Int64(params.ReadMinCapacity),
- MaxCapacity: aws.Int64(params.ReadMaxCapacity),
- ResourceId: aws.String(resourceID),
- ScalableDimension: aws.String(readDimension),
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- }); err != nil {
- return convertError(err)
- }
- if _, err := svc.RegisterScalableTargetWithContext(ctx, &applicationautoscaling.RegisterScalableTargetInput{
- MinCapacity: aws.Int64(params.WriteMinCapacity),
- MaxCapacity: aws.Int64(params.WriteMaxCapacity),
- ResourceId: aws.String(resourceID),
- ScalableDimension: aws.String(writeDimension),
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- }); err != nil {
- return convertError(err)
- }
-
- // Define scaling policy. Defines the ratio of {read,write} consumed capacity to
- // provisioned capacity DynamoDB will try and maintain.
- if _, err := svc.PutScalingPolicyWithContext(ctx, &applicationautoscaling.PutScalingPolicyInput{
- PolicyName: aws.String(getReadScalingPolicyName(resourceID)),
- PolicyType: aws.String(applicationautoscaling.PolicyTypeTargetTrackingScaling),
- ResourceId: aws.String(resourceID),
- ScalableDimension: aws.String(readDimension),
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- TargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{
- PredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{
- PredefinedMetricType: aws.String(applicationautoscaling.MetricTypeDynamoDbreadCapacityUtilization),
- },
- TargetValue: aws.Float64(params.ReadTargetValue),
- },
- }); err != nil {
- return convertError(err)
- }
- if _, err := svc.PutScalingPolicyWithContext(ctx, &applicationautoscaling.PutScalingPolicyInput{
- PolicyName: aws.String(getWriteScalingPolicyName(resourceID)),
- PolicyType: aws.String(applicationautoscaling.PolicyTypeTargetTrackingScaling),
- ResourceId: aws.String(resourceID),
- ScalableDimension: aws.String(writeDimension),
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- TargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{
- PredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{
- PredefinedMetricType: aws.String(applicationautoscaling.MetricTypeDynamoDbwriteCapacityUtilization),
- },
- TargetValue: aws.Float64(params.WriteTargetValue),
- },
- }); err != nil {
- return convertError(err)
- }
-
- return nil
-}
-
-// GetTableID returns the resourceID of a table based on its table name
-func GetTableID(tableName string) string {
- return fmt.Sprintf("table/%s", tableName)
-}
-
-// GetIndexID returns the resourceID of an index, based on the table & index name
-func GetIndexID(tableName, indexName string) string {
- return fmt.Sprintf("table/%s/index/%s", tableName, indexName)
-}
-
-// getWriteScalingPolicyName returns the policy name for our write scaling policy
-func getWriteScalingPolicyName(resourceID string) string {
- // We're trimming the "table/" prefix since policies before 6.1.0 didn't contain it. By referencing an existing
- // policy name in 'PutScalingPolicy', AWS will update that one instead of creating a new resource.
- return fmt.Sprintf("%s-write-target-tracking-scaling-policy", strings.TrimPrefix(resourceID, "table/"))
-}
-
-// getWriteScalingPolicyName returns the policy name for our read scaling policy
-func getReadScalingPolicyName(resourceID string) string {
- return fmt.Sprintf("%s-read-target-tracking-scaling-policy", strings.TrimPrefix(resourceID, "table/"))
-}
-
-func TurnOnTimeToLive(ctx context.Context, svc dynamodbiface.DynamoDBAPI, tableName string, ttlKey string) error {
- status, err := svc.DescribeTimeToLiveWithContext(ctx, &dynamodb.DescribeTimeToLiveInput{
- TableName: aws.String(tableName),
- })
- if err != nil {
- return convertError(err)
- }
- switch aws.StringValue(status.TimeToLiveDescription.TimeToLiveStatus) {
- case dynamodb.TimeToLiveStatusEnabled, dynamodb.TimeToLiveStatusEnabling:
- return nil
- }
- _, err = svc.UpdateTimeToLiveWithContext(ctx, &dynamodb.UpdateTimeToLiveInput{
- TableName: aws.String(tableName),
- TimeToLiveSpecification: &dynamodb.TimeToLiveSpecification{
- AttributeName: aws.String(ttlKey),
- Enabled: aws.Bool(true),
- },
- })
- return convertError(err)
-}
-
-func TurnOnStreams(ctx context.Context, svc dynamodbiface.DynamoDBAPI, tableName string) error {
- status, err := svc.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{
- TableName: aws.String(tableName),
- })
- if err != nil {
- return convertError(err)
- }
- if status.Table.StreamSpecification != nil && aws.BoolValue(status.Table.StreamSpecification.StreamEnabled) {
- return nil
- }
- _, err = svc.UpdateTableWithContext(ctx, &dynamodb.UpdateTableInput{
- TableName: aws.String(tableName),
- StreamSpecification: &dynamodb.StreamSpecification{
- StreamEnabled: aws.Bool(true),
- StreamViewType: aws.String(dynamodb.StreamViewTypeNewImage),
- },
- })
- return convertError(err)
-}
diff --git a/lib/backend/dynamo/configure_test.go b/lib/backend/dynamo/configure_test.go
deleted file mode 100644
index 98713d53c5c7d..0000000000000
--- a/lib/backend/dynamo/configure_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-//go:build dynamodb
-// +build dynamodb
-
-/*
- * Teleport
- * Copyright (C) 2023 Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package dynamo
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/applicationautoscaling"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/google/uuid"
- "github.com/gravitational/trace"
- "github.com/stretchr/testify/require"
-)
-
-// TestContinuousBackups verifies that the continuous backup state is set upon
-// startup of DynamoDB.
-func TestContinuousBackups(t *testing.T) {
- // Create new backend with continuous backups enabled.
- b, err := New(context.Background(), map[string]interface{}{
- "table_name": uuid.New() + "-test",
- "continuous_backups": true,
- })
- require.NoError(t, err)
-
- // Remove table after tests are done.
- t.Cleanup(func() {
- require.NoError(t, deleteTable(context.Background(), b.svc, b.Config.TableName))
- })
-
- // Check status of continuous backups.
- ok, err := getContinuousBackups(context.Background(), b.svc, b.Config.TableName)
- require.NoError(t, err)
- require.True(t, ok)
-}
-
-// TestAutoScaling verifies that auto scaling is enabled upon startup of DynamoDB.
-func TestAutoScaling(t *testing.T) {
- // Create new backend with auto scaling enabled.
- b, err := New(context.Background(), map[string]interface{}{
- "table_name": uuid.New() + "-test",
- "auto_scaling": true,
- "read_min_capacity": 10,
- "read_max_capacity": 20,
- "read_target_value": 50.0,
- "write_min_capacity": 10,
- "write_max_capacity": 20,
- "write_target_value": 50.0,
- })
- require.NoError(t, err)
-
- // Remove table after tests are done.
- t.Cleanup(func() {
- require.NoError(t, deleteTable(context.Background(), b.svc, b.Config.TableName))
- })
-
- // Check auto scaling values match.
- resp, err := getAutoScaling(context.Background(), applicationautoscaling.New(b.session), b.Config.TableName)
- require.NoError(t, err)
- require.Equal(t, resp, &AutoScalingParams{
- ReadMinCapacity: 10,
- ReadMaxCapacity: 20,
- ReadTargetValue: 50.0,
- WriteMinCapacity: 10,
- WriteMaxCapacity: 20,
- WriteTargetValue: 50.0,
- })
-}
-
-// getContinuousBackups gets the state of continuous backups.
-func getContinuousBackups(ctx context.Context, svc *dynamodb.DynamoDB, tableName string) (bool, error) {
- resp, err := svc.DescribeContinuousBackupsWithContext(ctx, &dynamodb.DescribeContinuousBackupsInput{
- TableName: aws.String(tableName),
- })
- if err != nil {
- return false, convertError(err)
- }
-
- switch *resp.ContinuousBackupsDescription.PointInTimeRecoveryDescription.PointInTimeRecoveryStatus {
- case string(dynamodb.ContinuousBackupsStatusEnabled):
- return true, nil
- case string(dynamodb.ContinuousBackupsStatusDisabled):
- return false, nil
- default:
- return false, trace.BadParameter("dynamo returned unknown state for continuous backups: %v",
- *resp.ContinuousBackupsDescription.PointInTimeRecoveryDescription.PointInTimeRecoveryStatus)
- }
-}
-
-// getAutoScaling gets the state of auto scaling.
-func getAutoScaling(ctx context.Context, svc *applicationautoscaling.ApplicationAutoScaling, tableName string) (*AutoScalingParams, error) {
- var resp AutoScalingParams
-
- // Get scaling targets.
- targetResponse, err := svc.DescribeScalableTargets(&applicationautoscaling.DescribeScalableTargetsInput{
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- })
- if err != nil {
- return nil, convertError(err)
- }
- for _, target := range targetResponse.ScalableTargets {
- switch *target.ScalableDimension {
- case applicationautoscaling.ScalableDimensionDynamodbTableReadCapacityUnits:
- resp.ReadMinCapacity = *target.MinCapacity
- resp.ReadMaxCapacity = *target.MaxCapacity
- case applicationautoscaling.ScalableDimensionDynamodbTableWriteCapacityUnits:
- resp.WriteMinCapacity = *target.MinCapacity
- resp.WriteMaxCapacity = *target.MaxCapacity
- }
- }
-
- // Get scaling policies.
- policyResponse, err := svc.DescribeScalingPolicies(&applicationautoscaling.DescribeScalingPoliciesInput{
- ServiceNamespace: aws.String(applicationautoscaling.ServiceNamespaceDynamodb),
- })
- if err != nil {
- return nil, convertError(err)
- }
- for _, policy := range policyResponse.ScalingPolicies {
- switch *policy.PolicyName {
- case fmt.Sprintf("%v-%v", tableName, readScalingPolicySuffix):
- resp.ReadTargetValue = *policy.TargetTrackingScalingPolicyConfiguration.TargetValue
- case fmt.Sprintf("%v-%v", tableName, writeScalingPolicySuffix):
- resp.WriteTargetValue = *policy.TargetTrackingScalingPolicyConfiguration.TargetValue
- }
- }
-
- return &resp, nil
-}
-
-// deleteTable will remove a table.
-func deleteTable(ctx context.Context, svc *dynamodb.DynamoDB, tableName string) error {
- _, err := svc.DeleteTableWithContext(ctx, &dynamodb.DeleteTableInput{
- TableName: aws.String(tableName),
- })
- if err != nil {
- return convertError(err)
- }
- err = svc.WaitUntilTableNotExistsWithContext(ctx, &dynamodb.DescribeTableInput{
- TableName: aws.String(tableName),
- })
- if err != nil {
- return convertError(err)
- }
- return nil
-}
-
-const (
- readScalingPolicySuffix = "read-target-tracking-scaling-policy"
- writeScalingPolicySuffix = "write-target-tracking-scaling-policy"
- resourcePrefix = "table"
-)
diff --git a/lib/backend/dynamo/dynamodbbk.go b/lib/backend/dynamo/dynamodbbk.go
index b0b711fd20123..80095069c715d 100644
--- a/lib/backend/dynamo/dynamodbbk.go
+++ b/lib/backend/dynamo/dynamodbbk.go
@@ -29,26 +29,27 @@ import (
"sync/atomic"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/applicationautoscaling"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue"
+ "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling"
+ autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling/types"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams"
+ streamtypes "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/utils"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/modules"
+ awsmetrics "github.com/gravitational/teleport/lib/observability/metrics/aws"
dynamometrics "github.com/gravitational/teleport/lib/observability/metrics/dynamo"
)
@@ -69,44 +70,41 @@ type Config struct {
SecretKey string `json:"secret_key,omitempty"`
// TableName where to store K/V in DynamoDB
TableName string `json:"table_name,omitempty"`
- // ReadCapacityUnits is Dynamodb read capacity units
- ReadCapacityUnits int64 `json:"read_capacity_units"`
- // WriteCapacityUnits is Dynamodb write capacity units
- WriteCapacityUnits int64 `json:"write_capacity_units"`
+ // BillingMode sets on-demand capacity to the DynamoDB tables
+ BillingMode billingMode `json:"billing_mode,omitempty"`
+ // RetryPeriod is a period between dynamo backend retries on failures
+ RetryPeriod time.Duration `json:"retry_period"`
// BufferSize is a default buffer size
// used to pull events
BufferSize int `json:"buffer_size,omitempty"`
// PollStreamPeriod is a polling period for event stream
PollStreamPeriod time.Duration `json:"poll_stream_period,omitempty"`
- // RetryPeriod is a period between dynamo backend retries on failures
- RetryPeriod time.Duration `json:"retry_period"`
-
- // EnableContinuousBackups is used to enables PITR (Point-In-Time Recovery).
- EnableContinuousBackups bool `json:"continuous_backups,omitempty"`
-
- // EnableAutoScaling is used to enable auto scaling policy.
- EnableAutoScaling bool `json:"auto_scaling,omitempty"`
+ // WriteCapacityUnits is Dynamodb write capacity units
+ WriteCapacityUnits int64 `json:"write_capacity_units"`
+ // ReadTargetValue is the ratio of consumed read capacity to provisioned
+ // capacity. Required to be set if auto scaling is enabled.
+ ReadTargetValue float64 `json:"read_target_value,omitempty"`
+ // WriteTargetValue is the ratio of consumed write capacity to provisioned
+ // capacity. Required to be set if auto scaling is enabled.
+ WriteTargetValue float64 `json:"write_target_value,omitempty"`
+ // ReadCapacityUnits is Dynamodb read capacity units
+ ReadCapacityUnits int64 `json:"read_capacity_units"`
// ReadMaxCapacity is the maximum provisioned read capacity. Required to be
// set if auto scaling is enabled.
- ReadMaxCapacity int64 `json:"read_max_capacity,omitempty"`
+ ReadMaxCapacity int32 `json:"read_max_capacity,omitempty"`
// ReadMinCapacity is the minimum provisioned read capacity. Required to be
// set if auto scaling is enabled.
- ReadMinCapacity int64 `json:"read_min_capacity,omitempty"`
- // ReadTargetValue is the ratio of consumed read capacity to provisioned
- // capacity. Required to be set if auto scaling is enabled.
- ReadTargetValue float64 `json:"read_target_value,omitempty"`
+ ReadMinCapacity int32 `json:"read_min_capacity,omitempty"`
// WriteMaxCapacity is the maximum provisioned write capacity. Required to
// be set if auto scaling is enabled.
- WriteMaxCapacity int64 `json:"write_max_capacity,omitempty"`
+ WriteMaxCapacity int32 `json:"write_max_capacity,omitempty"`
// WriteMinCapacity is the minimum provisioned write capacity. Required to
// be set if auto scaling is enabled.
- WriteMinCapacity int64 `json:"write_min_capacity,omitempty"`
- // WriteTargetValue is the ratio of consumed write capacity to provisioned
- // capacity. Required to be set if auto scaling is enabled.
- WriteTargetValue float64 `json:"write_target_value,omitempty"`
-
- // BillingMode sets on-demand capacity to the DynamoDB tables
- BillingMode billingMode `json:"billing_mode,omitempty"`
+ WriteMinCapacity int32 `json:"write_min_capacity,omitempty"`
+ // EnableContinuousBackups is used to enables PITR (Point-In-Time Recovery).
+ EnableContinuousBackups bool `json:"continuous_backups,omitempty"`
+ // EnableAutoScaling is used to enable auto scaling policy.
+ EnableAutoScaling bool `json:"auto_scaling,omitempty"`
}
type billingMode string
@@ -147,28 +145,43 @@ func (cfg *Config) CheckAndSetDefaults() error {
return nil
}
+type dynamoClient interface {
+ DescribeTimeToLive(ctx context.Context, params *dynamodb.DescribeTimeToLiveInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTimeToLiveOutput, error)
+ UpdateTimeToLive(ctx context.Context, params *dynamodb.UpdateTimeToLiveInput, optFns ...func(*dynamodb.Options)) (*dynamodb.UpdateTimeToLiveOutput, error)
+ DescribeTable(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error)
+ UpdateTable(ctx context.Context, params *dynamodb.UpdateTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.UpdateTableOutput, error)
+ DeleteTable(ctx context.Context, params *dynamodb.DeleteTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DeleteTableOutput, error)
+ UpdateContinuousBackups(ctx context.Context, params *dynamodb.UpdateContinuousBackupsInput, optFns ...func(*dynamodb.Options)) (*dynamodb.UpdateContinuousBackupsOutput, error)
+ DescribeContinuousBackups(ctx context.Context, params *dynamodb.DescribeContinuousBackupsInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeContinuousBackupsOutput, error)
+ BatchWriteItem(ctx context.Context, params *dynamodb.BatchWriteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.BatchWriteItemOutput, error)
+ PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error)
+ DeleteItem(ctx context.Context, params *dynamodb.DeleteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DeleteItemOutput, error)
+ UpdateItem(ctx context.Context, params *dynamodb.UpdateItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.UpdateItemOutput, error)
+ GetItem(ctx context.Context, params *dynamodb.GetItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.GetItemOutput, error)
+ CreateTable(ctx context.Context, params *dynamodb.CreateTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.CreateTableOutput, error)
+ Query(ctx context.Context, params *dynamodb.QueryInput, optFns ...func(*dynamodb.Options)) (*dynamodb.QueryOutput, error)
+ TransactWriteItems(ctx context.Context, params *dynamodb.TransactWriteItemsInput, optFns ...func(*dynamodb.Options)) (*dynamodb.TransactWriteItemsOutput, error)
+}
+
// Backend is a DynamoDB-backed key value backend implementation.
type Backend struct {
+ svc dynamoClient
+ clock clockwork.Clock
*log.Entry
- Config
- svc dynamodbiface.DynamoDBAPI
- streams dynamodbstreamsiface.DynamoDBStreamsAPI
- clock clockwork.Clock
+ streams *dynamodbstreams.Client
buf *backend.CircularBuffer
+ Config
// closedFlag is set to indicate that the database is closed
closedFlag int32
-
- // session holds the AWS client.
- session *session.Session
}
type record struct {
+ Expires *int64 `json:"Expires,omitempty" dynamodbav:",omitempty"`
HashKey string
FullPath string
+ Revision string
Value []byte
Timestamp int64
- Expires *int64 `json:"Expires,omitempty"`
- Revision string
}
type keyLookup struct {
@@ -223,8 +236,7 @@ func New(ctx context.Context, params backend.Params) (*Backend, error) {
l := log.WithFields(log.Fields{teleport.ComponentKey: BackendName})
var cfg *Config
- err := utils.ObjectToStruct(params, &cfg)
- if err != nil {
+ if err := utils.ObjectToStruct(params, &cfg); err != nil {
return nil, trace.BadParameter("DynamoDB configuration is invalid: %v", err)
}
@@ -236,132 +248,211 @@ func New(ctx context.Context, params backend.Params) (*Backend, error) {
l.Infof("Initializing backend. Table: %q, poll streams every %v.", cfg.TableName, cfg.PollStreamPeriod)
- buf := backend.NewCircularBuffer(
- backend.BufferCapacity(cfg.BufferSize),
- )
- b := &Backend{
- Entry: l,
- Config: *cfg,
- clock: clockwork.NewRealClock(),
- buf: buf,
- }
-
- // determine if the FIPS endpoints should be used
- useFIPSEndpoint := endpoints.FIPSEndpointStateUnset
- if modules.GetModules().IsBoringBinary() {
- useFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
+ opts := []func(*config.LoadOptions) error{
+ config.WithRegion(cfg.Region),
+ config.WithHTTPClient(&http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ MaxIdleConns: defaults.HTTPMaxIdleConns,
+ MaxIdleConnsPerHost: defaults.HTTPMaxIdleConnsPerHost,
+ },
+ }),
+ config.WithAPIOptions(awsmetrics.MetricsMiddleware()),
+ config.WithAPIOptions(dynamometrics.MetricsMiddleware(dynamometrics.Backend)),
}
- awsConfig := aws.Config{}
- if cfg.Region != "" {
- awsConfig.Region = aws.String(cfg.Region)
- }
if cfg.AccessKey != "" || cfg.SecretKey != "" {
- awsConfig.Credentials = credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, "")
+ opts = append(opts, config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cfg.AccessKey, cfg.SecretKey, "")))
}
- b.session, err = session.NewSessionWithOptions(session.Options{
- SharedConfigState: session.SharedConfigEnable,
- Config: awsConfig,
- })
+ awsConfig, err := config.LoadDefaultConfig(ctx, opts...)
if err != nil {
return nil, trace.Wrap(err)
}
- // Increase the size of the connection pool. This substantially improves the
- // performance of Teleport under load as it reduces the number of TLS
- // handshakes performed.
- httpClient := &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- MaxIdleConns: defaults.HTTPMaxIdleConns,
- MaxIdleConnsPerHost: defaults.HTTPMaxIdleConnsPerHost,
- },
+ var dynamoOpts []func(*dynamodb.Options)
+
+ // FIPS settings are applied on the individual service instead of the aws config,
+ // as DynamoDB Streams and Application Auto Scaling do not yet have FIPS endpoints in non-GovCloud.
+ // See also: https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service
+ if modules.GetModules().IsBoringBinary() {
+ dynamoOpts = append(dynamoOpts, func(o *dynamodb.Options) {
+ o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ })
}
- b.session.Config.HTTPClient = httpClient
- // Create DynamoDB service.
- svc, err := dynamometrics.NewAPIMetrics(dynamometrics.Backend, dynamodb.New(b.session, &aws.Config{
- // Setting this on the individual service instead of the session, as DynamoDB Streams
- // and Application Auto Scaling do not yet have FIPS endpoints in non-GovCloud.
- // See also: https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service
- UseFIPSEndpoint: useFIPSEndpoint,
- }))
- if err != nil {
- return nil, trace.Wrap(err)
+ otelaws.AppendMiddlewares(&awsConfig.APIOptions, otelaws.WithAttributeSetter(otelaws.DynamoDBAttributeSetter))
+
+ b := &Backend{
+ Entry: l,
+ Config: *cfg,
+ clock: clockwork.NewRealClock(),
+ buf: backend.NewCircularBuffer(backend.BufferCapacity(cfg.BufferSize)),
+ svc: dynamodb.NewFromConfig(awsConfig, dynamoOpts...),
+ streams: dynamodbstreams.NewFromConfig(awsConfig),
}
- b.svc = svc
- streams, err := dynamometrics.NewStreamsMetricsAPI(dynamometrics.Backend, dynamodbstreams.New(b.session))
- if err != nil {
+
+ if err := b.configureTable(ctx, applicationautoscaling.NewFromConfig(awsConfig)); err != nil {
return nil, trace.Wrap(err)
}
- b.streams = streams
+ go func() {
+ if err := b.asyncPollStreams(ctx); err != nil {
+ b.Errorf("Stream polling loop exited: %v", err)
+ }
+ }()
+
+ return b, nil
+}
+
+func (b *Backend) configureTable(ctx context.Context, svc *applicationautoscaling.Client) error {
+ tableName := aws.String(b.TableName)
// check if the table exists?
- ts, tableBillingMode, err := b.getTableStatus(ctx, b.TableName)
+ ts, tableBillingMode, err := b.getTableStatus(ctx, tableName)
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(err)
}
+
switch ts {
case tableStatusOK:
- if tableBillingMode == dynamodb.BillingModePayPerRequest {
- cfg.EnableAutoScaling = false
- l.Info("Ignoring auto_scaling setting as table is in on-demand mode.")
+ if tableBillingMode == types.BillingModePayPerRequest {
+ b.Config.EnableAutoScaling = false
+ b.Logger.Info("Ignoring auto_scaling setting as table is in on-demand mode.")
}
case tableStatusMissing:
- if cfg.BillingMode == billingModePayPerRequest {
- cfg.EnableAutoScaling = false
- l.Info("Ignoring auto_scaling setting as table is being created in on-demand mode.")
+ if b.Config.BillingMode == billingModePayPerRequest {
+ b.Config.EnableAutoScaling = false
+ b.Logger.Info("Ignoring auto_scaling setting as table is being created in on-demand mode.")
}
- err = b.createTable(ctx, b.TableName, fullPathKey)
+ err = b.createTable(ctx, tableName, fullPathKey)
case tableStatusNeedsMigration:
- return nil, trace.BadParameter("unsupported schema")
+ return trace.BadParameter("unsupported schema")
}
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(err)
}
// Enable TTL on table.
- err = TurnOnTimeToLive(ctx, b.svc, b.TableName, ttlKey)
+ ttlStatus, err := b.svc.DescribeTimeToLive(ctx, &dynamodb.DescribeTimeToLiveInput{
+ TableName: tableName,
+ })
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(convertError(err))
+ }
+ switch ttlStatus.TimeToLiveDescription.TimeToLiveStatus {
+ case types.TimeToLiveStatusEnabled, types.TimeToLiveStatusEnabling:
+ default:
+ _, err = b.svc.UpdateTimeToLive(ctx, &dynamodb.UpdateTimeToLiveInput{
+ TableName: tableName,
+ TimeToLiveSpecification: &types.TimeToLiveSpecification{
+ AttributeName: aws.String(ttlKey),
+ Enabled: aws.Bool(true),
+ },
+ })
+ if err != nil {
+ return trace.Wrap(convertError(err))
+ }
}
// Turn on DynamoDB streams, needed to implement events.
- err = TurnOnStreams(ctx, b.svc, b.TableName)
+ tableStatus, err := b.svc.DescribeTable(ctx, &dynamodb.DescribeTableInput{
+ TableName: tableName,
+ })
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(convertError(err))
+ }
+
+ if tableStatus.Table.StreamSpecification == nil || (tableStatus.Table.StreamSpecification != nil && !aws.ToBool(tableStatus.Table.StreamSpecification.StreamEnabled)) {
+ _, err = b.svc.UpdateTable(ctx, &dynamodb.UpdateTableInput{
+ TableName: tableName,
+ StreamSpecification: &types.StreamSpecification{
+ StreamEnabled: aws.Bool(true),
+ StreamViewType: types.StreamViewTypeNewImage,
+ },
+ })
+ if err != nil {
+ return trace.Wrap(convertError(err))
+ }
}
// Enable continuous backups if requested.
if b.Config.EnableContinuousBackups {
- if err := SetContinuousBackups(ctx, b.svc, b.TableName); err != nil {
- return nil, trace.Wrap(err)
+ // Make request to AWS to update continuous backups settings.
+ _, err := b.svc.UpdateContinuousBackups(ctx, &dynamodb.UpdateContinuousBackupsInput{
+ PointInTimeRecoverySpecification: &types.PointInTimeRecoverySpecification{
+ PointInTimeRecoveryEnabled: aws.Bool(true),
+ },
+ TableName: tableName,
+ })
+ if err != nil {
+ return trace.Wrap(convertError(err))
}
}
// Enable auto scaling if requested.
if b.Config.EnableAutoScaling {
- if err := SetAutoScaling(ctx, applicationautoscaling.New(b.session), GetTableID(b.TableName), AutoScalingParams{
- ReadMinCapacity: b.Config.ReadMinCapacity,
- ReadMaxCapacity: b.Config.ReadMaxCapacity,
- ReadTargetValue: b.Config.ReadTargetValue,
- WriteMinCapacity: b.Config.WriteMinCapacity,
- WriteMaxCapacity: b.Config.WriteMaxCapacity,
- WriteTargetValue: b.Config.WriteTargetValue,
+ readDimension := autoscalingtypes.ScalableDimensionDynamoDBTableReadCapacityUnits
+ writeDimension := autoscalingtypes.ScalableDimensionDynamoDBTableWriteCapacityUnits
+ resourceID := "table/" + b.TableName
+
+ // Define scaling targets. Defines minimum and maximum {read,write} capacity.
+ if _, err := svc.RegisterScalableTarget(ctx, &applicationautoscaling.RegisterScalableTargetInput{
+ MinCapacity: aws.Int32(b.ReadMinCapacity),
+ MaxCapacity: aws.Int32(b.ReadMaxCapacity),
+ ResourceId: aws.String(resourceID),
+ ScalableDimension: readDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
}); err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(convertError(err))
+ }
+ if _, err := svc.RegisterScalableTarget(ctx, &applicationautoscaling.RegisterScalableTargetInput{
+ MinCapacity: aws.Int32(b.WriteMinCapacity),
+ MaxCapacity: aws.Int32(b.WriteMaxCapacity),
+ ResourceId: aws.String(resourceID),
+ ScalableDimension: writeDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ }); err != nil {
+ return trace.Wrap(convertError(err))
}
- }
- go func() {
- if err := b.asyncPollStreams(ctx); err != nil {
- b.Errorf("Stream polling loop exited: %v", err)
+ // Define scaling policy. Defines the ratio of {read,write} consumed capacity to
+ // provisioned capacity DynamoDB will try and maintain.
+ readPolicy := b.TableName + "-read-target-tracking-scaling-policy"
+ if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{
+ PolicyName: aws.String(readPolicy),
+ PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling,
+ ResourceId: aws.String(resourceID),
+ ScalableDimension: readDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{
+ PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{
+ PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBReadCapacityUtilization,
+ },
+ TargetValue: aws.Float64(b.ReadTargetValue),
+ },
+ }); err != nil {
+ return trace.Wrap(convertError(err))
}
- }()
- // Wrap backend in a input sanitizer and return it.
- return b, nil
+ writePolicy := b.TableName + "-write-target-tracking-scaling-policy"
+ if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{
+ PolicyName: aws.String(writePolicy),
+ PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling,
+ ResourceId: aws.String(resourceID),
+ ScalableDimension: writeDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{
+ PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{
+ PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBWriteCapacityUtilization,
+ },
+ TargetValue: aws.Float64(b.WriteTargetValue),
+ },
+ }); err != nil {
+ return trace.Wrap(convertError(err))
+ }
+ }
+
+ return nil
}
func (b *Backend) GetName() string {
@@ -491,28 +582,24 @@ func (b *Backend) DeleteRange(ctx context.Context, startKey, endKey []byte) erro
if len(result.records) == 0 {
return nil
}
- requests := make([]*dynamodb.WriteRequest, 0, len(result.records))
+ requests := make([]types.WriteRequest, 0, len(result.records))
for _, record := range result.records {
- requests = append(requests, &dynamodb.WriteRequest{
- DeleteRequest: &dynamodb.DeleteRequest{
- Key: map[string]*dynamodb.AttributeValue{
- hashKeyKey: {
- S: aws.String(hashKey),
- },
- fullPathKey: {
- S: aws.String(record.FullPath),
- },
+ requests = append(requests, types.WriteRequest{
+ DeleteRequest: &types.DeleteRequest{
+ Key: map[string]types.AttributeValue{
+ hashKeyKey: &types.AttributeValueMemberS{Value: hashKey},
+ fullPathKey: &types.AttributeValueMemberS{Value: record.FullPath},
},
},
})
}
input := dynamodb.BatchWriteItemInput{
- RequestItems: map[string][]*dynamodb.WriteRequest{
+ RequestItems: map[string][]types.WriteRequest{
b.TableName: requests,
},
}
- if _, err = b.svc.BatchWriteItemWithContext(ctx, &input); err != nil {
+ if _, err = b.svc.BatchWriteItem(ctx, &input); err != nil {
return trace.Wrap(err)
}
}
@@ -566,24 +653,23 @@ func (b *Backend) CompareAndSwap(ctx context.Context, expected backend.Item, rep
if !replaceWith.Expires.IsZero() {
r.Expires = aws.Int64(replaceWith.Expires.UTC().Unix())
}
- av, err := dynamodbattribute.MarshalMap(r)
+ av, err := attributevalue.MarshalMap(r)
if err != nil {
return nil, trace.Wrap(err)
}
input := dynamodb.PutItemInput{
- Item: av,
- TableName: aws.String(b.TableName),
- }
- input.SetConditionExpression("#v = :prev")
- input.SetExpressionAttributeNames(map[string]*string{
- "#v": aws.String("Value"),
- })
- input.SetExpressionAttributeValues(map[string]*dynamodb.AttributeValue{
- ":prev": {
- B: expected.Value,
+ Item: av,
+ TableName: aws.String(b.TableName),
+ ConditionExpression: aws.String("#v = :prev"),
+ ExpressionAttributeNames: map[string]string{
+ "#v": "Value",
},
- })
- _, err = b.svc.PutItemWithContext(ctx, &input)
+ ExpressionAttributeValues: map[string]types.AttributeValue{
+ ":prev": &types.AttributeValueMemberB{Value: expected.Value},
+ },
+ }
+
+ _, err = b.svc.PutItem(ctx, &input)
err = convertError(err)
if err != nil {
// in this case let's use more specific compare failed error
@@ -630,7 +716,7 @@ func (b *Backend) ConditionalDelete(ctx context.Context, key []byte, rev string)
return trace.Wrap(backend.ErrIncorrectRevision)
}
- av, err := dynamodbattribute.MarshalMap(keyLookup{
+ av, err := attributevalue.MarshalMap(keyLookup{
HashKey: hashKey,
FullPath: prependPrefix(key),
})
@@ -638,16 +724,19 @@ func (b *Backend) ConditionalDelete(ctx context.Context, key []byte, rev string)
return trace.Wrap(err)
}
- input := dynamodb.DeleteItemInput{Key: av, TableName: aws.String(b.TableName)}
+ input := dynamodb.DeleteItemInput{
+ Key: av,
+ TableName: aws.String(b.TableName),
+ }
if rev == backend.BlankRevision {
- input.SetConditionExpression("attribute_not_exists(Revision) AND attribute_exists(FullPath)")
+ input.ConditionExpression = aws.String("attribute_not_exists(Revision) AND attribute_exists(FullPath)")
} else {
- input.SetExpressionAttributeValues(map[string]*dynamodb.AttributeValue{":rev": {S: aws.String(rev)}})
- input.SetConditionExpression("Revision = :rev AND attribute_exists(FullPath)")
+ input.ExpressionAttributeValues = map[string]types.AttributeValue{":rev": &types.AttributeValueMemberS{Value: rev}}
+ input.ConditionExpression = aws.String("Revision = :rev AND attribute_exists(FullPath)")
}
- if _, err = b.svc.DeleteItemWithContext(ctx, &input); err != nil {
+ if _, err = b.svc.DeleteItem(ctx, &input); err != nil {
err = convertError(err)
if trace.IsCompareFailed(err) {
return trace.Wrap(backend.ErrIncorrectRevision)
@@ -671,27 +760,19 @@ func (b *Backend) KeepAlive(ctx context.Context, lease backend.Lease, expires ti
return trace.BadParameter("lease is missing key")
}
input := &dynamodb.UpdateItemInput{
- ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
- ":expires": {
- N: aws.String(strconv.FormatInt(expires.UTC().Unix(), 10)),
- },
- ":timestamp": {
- N: aws.String(strconv.FormatInt(b.clock.Now().UTC().Unix(), 10)),
- },
+ ExpressionAttributeValues: map[string]types.AttributeValue{
+ ":expires": &types.AttributeValueMemberN{Value: strconv.FormatInt(expires.UTC().Unix(), 10)},
+ ":timestamp": &types.AttributeValueMemberN{Value: strconv.FormatInt(b.clock.Now().UTC().Unix(), 10)},
},
TableName: aws.String(b.TableName),
- Key: map[string]*dynamodb.AttributeValue{
- hashKeyKey: {
- S: aws.String(hashKey),
- },
- fullPathKey: {
- S: aws.String(prependPrefix(lease.Key)),
- },
+ Key: map[string]types.AttributeValue{
+ hashKeyKey: &types.AttributeValueMemberS{Value: hashKey},
+ fullPathKey: &types.AttributeValueMemberS{Value: prependPrefix(lease.Key)},
},
- UpdateExpression: aws.String("SET Expires = :expires"),
+ UpdateExpression: aws.String("SET Expires = :expires"),
+ ConditionExpression: aws.String("attribute_exists(FullPath) AND (attribute_not_exists(Expires) OR Expires >= :timestamp)"),
}
- input.SetConditionExpression("attribute_exists(FullPath) AND (attribute_not_exists(Expires) OR Expires >= :timestamp)")
- _, err := b.svc.UpdateItemWithContext(ctx, input)
+ _, err := b.svc.UpdateItem(ctx, input)
err = convertError(err)
if trace.IsCompareFailed(err) {
err = trace.NotFound(err.Error())
@@ -735,9 +816,9 @@ func (b *Backend) Clock() clockwork.Clock {
}
// getTableStatus checks if a given table exists
-func (b *Backend) getTableStatus(ctx context.Context, tableName string) (tableStatus, string, error) {
- td, err := b.svc.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{
- TableName: aws.String(tableName),
+func (b *Backend) getTableStatus(ctx context.Context, tableName *string) (tableStatus, types.BillingMode, error) {
+ td, err := b.svc.DescribeTable(ctx, &dynamodb.DescribeTableInput{
+ TableName: tableName,
})
err = convertError(err)
if err != nil {
@@ -756,9 +837,9 @@ func (b *Backend) getTableStatus(ctx context.Context, tableName string) (tableSt
// PROVISIONED, if unspecified.
// https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BillingModeSummary.html
if td.Table.BillingModeSummary == nil {
- return tableStatusOK, dynamodb.BillingModeProvisioned, nil
+ return tableStatusOK, types.BillingModeProvisioned, nil
}
- return tableStatusOK, aws.StringValue(td.Table.BillingModeSummary.BillingMode), nil
+ return tableStatusOK, td.Table.BillingModeSummary.BillingMode, nil
}
// createTable creates a DynamoDB table with a requested name and applies
@@ -772,68 +853,72 @@ func (b *Backend) getTableStatus(ctx context.Context, tableName string) (tableSt
// documentation in case users want to set up DynamoDB tables manually. Edit the
// following docs partial:
// docs/pages/includes/dynamodb-iam-policy.mdx
-func (b *Backend) createTable(ctx context.Context, tableName string, rangeKey string) error {
- billingMode := aws.String(dynamodb.BillingModeProvisioned)
- pThroughput := &dynamodb.ProvisionedThroughput{
+func (b *Backend) createTable(ctx context.Context, tableName *string, rangeKey string) error {
+ billingMode := types.BillingModeProvisioned
+ pThroughput := &types.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(b.ReadCapacityUnits),
WriteCapacityUnits: aws.Int64(b.WriteCapacityUnits),
}
if b.BillingMode == billingModePayPerRequest {
- billingMode = aws.String(dynamodb.BillingModePayPerRequest)
+ billingMode = types.BillingModePayPerRequest
pThroughput = nil
}
- def := []*dynamodb.AttributeDefinition{
+ def := []types.AttributeDefinition{
{
AttributeName: aws.String(hashKeyKey),
- AttributeType: aws.String("S"),
+ AttributeType: types.ScalarAttributeTypeS,
},
{
AttributeName: aws.String(rangeKey),
- AttributeType: aws.String("S"),
+ AttributeType: types.ScalarAttributeTypeS,
},
}
- elems := []*dynamodb.KeySchemaElement{
+ elems := []types.KeySchemaElement{
{
AttributeName: aws.String(hashKeyKey),
- KeyType: aws.String("HASH"),
+ KeyType: types.KeyTypeHash,
},
{
AttributeName: aws.String(rangeKey),
- KeyType: aws.String("RANGE"),
+ KeyType: types.KeyTypeRange,
},
}
c := dynamodb.CreateTableInput{
- TableName: aws.String(tableName),
+ TableName: tableName,
AttributeDefinitions: def,
KeySchema: elems,
ProvisionedThroughput: pThroughput,
BillingMode: billingMode,
}
- _, err := b.svc.CreateTableWithContext(ctx, &c)
+ _, err := b.svc.CreateTable(ctx, &c)
if err != nil {
return trace.Wrap(err)
}
- b.Infof("Waiting until table %q is created.", tableName)
- err = b.svc.WaitUntilTableExistsWithContext(ctx, &dynamodb.DescribeTableInput{
- TableName: aws.String(tableName),
- })
+ b.Infof("Waiting until table %q is created.", aws.ToString(tableName))
+ waiter := dynamodb.NewTableExistsWaiter(b.svc)
+
+ err = waiter.Wait(ctx,
+ &dynamodb.DescribeTableInput{TableName: tableName},
+ 10*time.Minute,
+ )
if err == nil {
- b.Infof("Table %q has been created.", tableName)
+ b.Infof("Table %q has been created.", aws.ToString(tableName))
}
+
return trace.Wrap(err)
}
type getResult struct {
- records []record
// lastEvaluatedKey is the primary key of the item where the operation stopped, inclusive of the
// previous result set. Use this value to start a new operation, excluding this
// value in the new request.
- lastEvaluatedKey map[string]*dynamodb.AttributeValue
+ lastEvaluatedKey map[string]types.AttributeValue
+ records []record
}
// getRecords retrieves all keys by path
-func (b *Backend) getRecords(ctx context.Context, startKey, endKey string, limit int, lastEvaluatedKey map[string]*dynamodb.AttributeValue) (*getResult, error) {
+func (b *Backend) getRecords(ctx context.Context, startKey, endKey string, limit int, lastEvaluatedKey map[string]types.AttributeValue) (*getResult, error) {
query := "HashKey = :hashKey AND FullPath BETWEEN :fullPath AND :rangeEnd"
attrV := map[string]interface{}{
":fullPath": startKey,
@@ -845,7 +930,7 @@ func (b *Backend) getRecords(ctx context.Context, startKey, endKey string, limit
// filter out expired items, otherwise they might show up in the query
// http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/howitworks-ttl.html
filter := "attribute_not_exists(Expires) OR Expires >= :timestamp"
- av, err := dynamodbattribute.MarshalMap(attrV)
+ av, err := attributevalue.MarshalMap(attrV)
if err != nil {
return nil, convertError(err)
}
@@ -858,16 +943,16 @@ func (b *Backend) getRecords(ctx context.Context, startKey, endKey string, limit
ExclusiveStartKey: lastEvaluatedKey,
}
if limit > 0 {
- input.Limit = aws.Int64(int64(limit))
+ input.Limit = aws.Int32(int32(limit))
}
- out, err := b.svc.QueryWithContext(ctx, &input)
+ out, err := b.svc.Query(ctx, &input)
if err != nil {
return nil, trace.Wrap(err)
}
var result getResult
for _, item := range out.Items {
var r record
- if err := dynamodbattribute.UnmarshalMap(item, &r); err != nil {
+ if err := attributevalue.UnmarshalMap(item, &r); err != nil {
return nil, trace.Wrap(err)
}
result.records = append(result.records, r)
@@ -891,12 +976,10 @@ func (r *record) isExpired(now time.Time) bool {
func removeDuplicates(elements []record) []record {
// Use map to record duplicates as we find them.
encountered := map[string]bool{}
- result := []record{}
+ var result []record
for v := range elements {
- if encountered[elements[v].FullPath] {
- // Do not add duplicate.
- } else {
+ if !encountered[elements[v].FullPath] {
// Record this element as an encountered element.
encountered[elements[v].FullPath] = true
// Append to result slice.
@@ -940,7 +1023,7 @@ func (b *Backend) create(ctx context.Context, item backend.Item, mode int) (stri
if !item.Expires.IsZero() {
r.Expires = aws.Int64(item.Expires.UTC().Unix())
}
- av, err := dynamodbattribute.MarshalMap(r)
+ av, err := attributevalue.MarshalMap(r)
if err != nil {
return "", trace.Wrap(err)
}
@@ -951,24 +1034,24 @@ func (b *Backend) create(ctx context.Context, item backend.Item, mode int) (stri
switch mode {
case modeCreate:
- input.SetConditionExpression("attribute_not_exists(FullPath)")
+ input.ConditionExpression = aws.String("attribute_not_exists(FullPath)")
case modeUpdate:
- input.SetConditionExpression("attribute_exists(FullPath)")
+ input.ConditionExpression = aws.String("attribute_exists(FullPath)")
case modePut:
case modeConditionalUpdate:
// If the revision is empty, then the resource existed prior to revision support. Instead of validating that
// the revisions match, validate that the revision attribute does not exist. Otherwise, validate that the revision
// attribute matches the item revision.
if item.Revision == "" {
- input.SetConditionExpression("attribute_not_exists(Revision) AND attribute_exists(FullPath)")
+ input.ConditionExpression = aws.String("attribute_not_exists(Revision) AND attribute_exists(FullPath)")
} else {
- input.SetExpressionAttributeValues(map[string]*dynamodb.AttributeValue{":rev": {S: aws.String(item.Revision)}})
- input.SetConditionExpression("Revision = :rev AND attribute_exists(FullPath)")
+ input.ExpressionAttributeValues = map[string]types.AttributeValue{":rev": &types.AttributeValueMemberS{Value: item.Revision}}
+ input.ConditionExpression = aws.String("Revision = :rev AND attribute_exists(FullPath)")
}
default:
return "", trace.BadParameter("unrecognized mode")
}
- _, err = b.svc.PutItemWithContext(ctx, &input)
+ _, err = b.svc.PutItem(ctx, &input)
err = convertError(err)
if err != nil {
if mode == modeConditionalUpdate && trace.IsCompareFailed(err) {
@@ -982,7 +1065,7 @@ func (b *Backend) create(ctx context.Context, item backend.Item, mode int) (stri
}
func (b *Backend) deleteKey(ctx context.Context, key []byte) error {
- av, err := dynamodbattribute.MarshalMap(keyLookup{
+ av, err := attributevalue.MarshalMap(keyLookup{
HashKey: hashKey,
FullPath: prependPrefix(key),
})
@@ -990,14 +1073,14 @@ func (b *Backend) deleteKey(ctx context.Context, key []byte) error {
return trace.Wrap(err)
}
input := dynamodb.DeleteItemInput{Key: av, TableName: aws.String(b.TableName)}
- if _, err = b.svc.DeleteItemWithContext(ctx, &input); err != nil {
+ if _, err = b.svc.DeleteItem(ctx, &input); err != nil {
return trace.Wrap(err)
}
return nil
}
func (b *Backend) deleteKeyIfExpired(ctx context.Context, key []byte) error {
- _, err := b.svc.DeleteItemWithContext(ctx, &dynamodb.DeleteItemInput{
+ _, err := b.svc.DeleteItem(ctx, &dynamodb.DeleteItemInput{
TableName: aws.String(b.TableName),
Key: keyToAttributeValueMap(key),
@@ -1005,7 +1088,7 @@ func (b *Backend) deleteKeyIfExpired(ctx context.Context, key []byte) error {
ConditionExpression: aws.String(
"attribute_not_exists(FullPath) OR (attribute_exists(Expires) AND Expires <= :timestamp)",
),
- ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ExpressionAttributeValues: map[string]types.AttributeValue{
":timestamp": timeToAttributeValue(b.clock.Now()),
},
})
@@ -1013,7 +1096,7 @@ func (b *Backend) deleteKeyIfExpired(ctx context.Context, key []byte) error {
}
func (b *Backend) getKey(ctx context.Context, key []byte) (*record, error) {
- av, err := dynamodbattribute.MarshalMap(keyLookup{
+ av, err := attributevalue.MarshalMap(keyLookup{
HashKey: hashKey,
FullPath: prependPrefix(key),
})
@@ -1025,7 +1108,7 @@ func (b *Backend) getKey(ctx context.Context, key []byte) (*record, error) {
TableName: aws.String(b.TableName),
ConsistentRead: aws.Bool(true),
}
- out, err := b.svc.GetItemWithContext(ctx, &input)
+ out, err := b.svc.GetItem(ctx, &input)
if err != nil {
// we deliberately use a "generic" trace error here, since we don't want
// callers to make assumptions about the nature of the failure.
@@ -1035,7 +1118,7 @@ func (b *Backend) getKey(ctx context.Context, key []byte) (*record, error) {
return nil, trace.NotFound("%q is not found", string(key))
}
var r record
- if err := dynamodbattribute.UnmarshalMap(out.Item, &r); err != nil {
+ if err := attributevalue.UnmarshalMap(out.Item, &r); err != nil {
return nil, trace.WrapWithMessage(err, "failed to unmarshal dynamo item %q", string(key))
}
// Check if key expired, if expired delete it
@@ -1052,26 +1135,52 @@ func convertError(err error) error {
if err == nil {
return nil
}
- var aerr awserr.Error
- if !errors.As(err, &aerr) {
- return err
+
+ var conditionalCheckFailedError *types.ConditionalCheckFailedException
+ if errors.As(err, &conditionalCheckFailedError) {
+ return trace.CompareFailed(conditionalCheckFailedError.ErrorMessage())
}
- switch aerr.Code() {
- case dynamodb.ErrCodeConditionalCheckFailedException:
- return trace.CompareFailed(aerr.Error())
- case dynamodb.ErrCodeProvisionedThroughputExceededException:
- return trace.ConnectionProblem(aerr, aerr.Error())
- case dynamodb.ErrCodeResourceNotFoundException, applicationautoscaling.ErrCodeObjectNotFoundException:
- return trace.NotFound(aerr.Error())
- case dynamodb.ErrCodeItemCollectionSizeLimitExceededException:
- return trace.BadParameter(aerr.Error())
- case dynamodb.ErrCodeInternalServerError:
- return trace.BadParameter(aerr.Error())
- case dynamodbstreams.ErrCodeExpiredIteratorException, dynamodbstreams.ErrCodeLimitExceededException, dynamodbstreams.ErrCodeTrimmedDataAccessException:
- return trace.ConnectionProblem(aerr, aerr.Error())
- default:
- return err
+
+ var throughputExceededError *types.ProvisionedThroughputExceededException
+ if errors.As(err, &throughputExceededError) {
+ return trace.ConnectionProblem(throughputExceededError, throughputExceededError.ErrorMessage())
+ }
+
+ var notFoundError *types.ResourceNotFoundException
+ if errors.As(err, ¬FoundError) {
+ return trace.NotFound(notFoundError.ErrorMessage())
+ }
+
+ var collectionLimitExceededError *types.ItemCollectionSizeLimitExceededException
+ if errors.As(err, ¬FoundError) {
+ return trace.BadParameter(collectionLimitExceededError.ErrorMessage())
+ }
+
+ var internalError *types.InternalServerError
+ if errors.As(err, &internalError) {
+ return trace.BadParameter(internalError.ErrorMessage())
}
+
+ var expiredIteratorError *streamtypes.ExpiredIteratorException
+ if errors.As(err, &expiredIteratorError) {
+ return trace.ConnectionProblem(expiredIteratorError, expiredIteratorError.ErrorMessage())
+ }
+
+ var limitExceededError *streamtypes.LimitExceededException
+ if errors.As(err, &limitExceededError) {
+ return trace.ConnectionProblem(limitExceededError, limitExceededError.ErrorMessage())
+ }
+ var trimmedAccessError *streamtypes.TrimmedDataAccessException
+ if errors.As(err, &trimmedAccessError) {
+ return trace.ConnectionProblem(trimmedAccessError, trimmedAccessError.ErrorMessage())
+ }
+
+ var scalingObjectNotFoundError *autoscalingtypes.ObjectNotFoundException
+ if errors.As(err, &scalingObjectNotFoundError) {
+ return trace.NotFound(scalingObjectNotFoundError.ErrorMessage())
+ }
+
+ return err
}
type records []record
@@ -1091,19 +1200,19 @@ func (r records) Less(i, j int) bool {
return r[i].FullPath < r[j].FullPath
}
-func fullPathToAttributeValueMap(fullPath string) map[string]*dynamodb.AttributeValue {
- return map[string]*dynamodb.AttributeValue{
- hashKeyKey: {S: aws.String(hashKey)},
- fullPathKey: {S: aws.String(fullPath)},
+func fullPathToAttributeValueMap(fullPath string) map[string]types.AttributeValue {
+ return map[string]types.AttributeValue{
+ hashKeyKey: &types.AttributeValueMemberS{Value: hashKey},
+ fullPathKey: &types.AttributeValueMemberS{Value: fullPath},
}
}
-func keyToAttributeValueMap(key []byte) map[string]*dynamodb.AttributeValue {
+func keyToAttributeValueMap(key []byte) map[string]types.AttributeValue {
return fullPathToAttributeValueMap(prependPrefix(key))
}
-func timeToAttributeValue(t time.Time) *dynamodb.AttributeValue {
- return &dynamodb.AttributeValue{
- N: aws.String(strconv.FormatInt(t.Unix(), 10)),
+func timeToAttributeValue(t time.Time) types.AttributeValue {
+ return &types.AttributeValueMemberN{
+ Value: strconv.FormatInt(t.Unix(), 10),
}
}
diff --git a/lib/backend/dynamo/dynamodbbk_test.go b/lib/backend/dynamo/dynamodbbk_test.go
index 0b5c05897bfcb..213d2c957fb19 100644
--- a/lib/backend/dynamo/dynamodbbk_test.go
+++ b/lib/backend/dynamo/dynamodbbk_test.go
@@ -20,17 +20,23 @@ package dynamo
import (
"context"
+ "fmt"
"os"
"testing"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling"
+ autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling/types"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/gravitational/teleport/lib/backend"
@@ -44,6 +50,7 @@ func TestMain(m *testing.M) {
}
func ensureTestsEnabled(t *testing.T) {
+ t.Helper()
const varName = "TELEPORT_DYNAMODB_TEST"
if os.Getenv(varName) == "" {
t.Skipf("DynamoDB tests are disabled. Enable by defining the %v environment variable", varName)
@@ -94,30 +101,28 @@ func TestDynamoDB(t *testing.T) {
}
type dynamoDBAPIMock struct {
- dynamodbiface.DynamoDBAPI
-
+ dynamoClient
+ expectedProvisionedthroughput *types.ProvisionedThroughput
expectedTableName string
- expectedBillingMode string
- expectedProvisionedthroughput *dynamodb.ProvisionedThroughput
+ expectedBillingMode types.BillingMode
}
-func (d *dynamoDBAPIMock) CreateTableWithContext(_ aws.Context, input *dynamodb.CreateTableInput, opts ...request.Option) (*dynamodb.CreateTableOutput, error) {
-
- if d.expectedTableName != aws.StringValue(input.TableName) {
+func (d *dynamoDBAPIMock) CreateTable(ctx context.Context, input *dynamodb.CreateTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.CreateTableOutput, error) {
+ if d.expectedTableName != aws.ToString(input.TableName) {
return nil, trace.BadParameter("table names do not match")
}
- if d.expectedBillingMode != aws.StringValue(input.BillingMode) {
+ if d.expectedBillingMode != input.BillingMode {
return nil, trace.BadParameter("billing mode does not match")
}
if d.expectedProvisionedthroughput != nil {
- if aws.StringValue(input.BillingMode) == dynamodb.BillingModePayPerRequest {
+ if input.BillingMode == types.BillingModePayPerRequest {
return nil, trace.BadParameter("pthroughput should be nil if on demand is true")
}
- if aws.Int64Value(d.expectedProvisionedthroughput.ReadCapacityUnits) != aws.Int64Value(input.ProvisionedThroughput.ReadCapacityUnits) ||
- aws.Int64Value(d.expectedProvisionedthroughput.WriteCapacityUnits) != aws.Int64Value(input.ProvisionedThroughput.WriteCapacityUnits) {
+ if aws.ToInt64(d.expectedProvisionedthroughput.ReadCapacityUnits) != aws.ToInt64(input.ProvisionedThroughput.ReadCapacityUnits) ||
+ aws.ToInt64(d.expectedProvisionedthroughput.WriteCapacityUnits) != aws.ToInt64(input.ProvisionedThroughput.WriteCapacityUnits) {
return nil, trace.BadParameter("pthroughput values were not equal")
}
@@ -126,11 +131,17 @@ func (d *dynamoDBAPIMock) CreateTableWithContext(_ aws.Context, input *dynamodb.
return nil, nil
}
-func (d *dynamoDBAPIMock) WaitUntilTableExistsWithContext(_ aws.Context, input *dynamodb.DescribeTableInput, _ ...request.WaiterOption) error {
- if d.expectedTableName != aws.StringValue(input.TableName) {
- return trace.BadParameter("table names do not match")
+func (d *dynamoDBAPIMock) DescribeTable(ctx context.Context, input *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error) {
+ if d.expectedTableName != aws.ToString(input.TableName) {
+ return nil, trace.BadParameter("table names do not match")
}
- return nil
+ return &dynamodb.DescribeTableOutput{
+ Table: &types.TableDescription{
+ TableName: input.TableName,
+ TableStatus: types.TableStatusActive,
+ },
+ ResultMetadata: middleware.Metadata{},
+ }, nil
}
func TestCreateTable(t *testing.T) {
@@ -139,19 +150,19 @@ func TestCreateTable(t *testing.T) {
errIsNil := func(err error) bool { return err == nil }
for _, tc := range []struct {
- name string
errorIsFn func(error) bool
+ expectedProvisionedThroughput *types.ProvisionedThroughput
+ name string
+ expectedBillingMode types.BillingMode
+ billingMode billingMode
readCapacityUnits int
writeCapacityUnits int
- expectedProvisionedThroughput *dynamodb.ProvisionedThroughput
- expectedBillingMode string
- billingMode billingMode
}{
{
name: "table creation succeeds",
errorIsFn: errIsNil,
billingMode: billingModePayPerRequest,
- expectedBillingMode: dynamodb.BillingModePayPerRequest,
+ expectedBillingMode: types.BillingModePayPerRequest,
},
{
name: "read/write capacity units are ignored if on demand is on",
@@ -159,43 +170,43 @@ func TestCreateTable(t *testing.T) {
writeCapacityUnits: 10,
errorIsFn: errIsNil,
billingMode: billingModePayPerRequest,
- expectedBillingMode: dynamodb.BillingModePayPerRequest,
+ expectedBillingMode: types.BillingModePayPerRequest,
},
{
name: "bad parameter when provisioned throughput is set",
readCapacityUnits: 10,
writeCapacityUnits: 10,
errorIsFn: trace.IsBadParameter,
- expectedProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ expectedProvisionedThroughput: &types.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(10),
},
billingMode: billingModePayPerRequest,
- expectedBillingMode: dynamodb.BillingModePayPerRequest,
+ expectedBillingMode: types.BillingModePayPerRequest,
},
{
name: "bad parameter when the incorrect billing mode is set",
readCapacityUnits: 10,
writeCapacityUnits: 10,
errorIsFn: trace.IsBadParameter,
- expectedProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ expectedProvisionedThroughput: &types.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(10),
},
billingMode: billingModePayPerRequest,
- expectedBillingMode: dynamodb.BillingModePayPerRequest,
+ expectedBillingMode: types.BillingModePayPerRequest,
},
{
name: "create table succeeds",
readCapacityUnits: 10,
writeCapacityUnits: 10,
errorIsFn: errIsNil,
- expectedProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ expectedProvisionedThroughput: &types.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(10),
},
billingMode: billingModeProvisioned,
- expectedBillingMode: dynamodb.BillingModeProvisioned,
+ expectedBillingMode: types.BillingModeProvisioned,
},
} {
@@ -206,7 +217,7 @@ func TestCreateTable(t *testing.T) {
expectedTableName: tableName,
expectedProvisionedthroughput: tc.expectedProvisionedThroughput,
}
- backend := &Backend{
+ b := &Backend{
Entry: log.NewEntry(log.New()),
Config: Config{
BillingMode: tc.billingMode,
@@ -217,8 +228,178 @@ func TestCreateTable(t *testing.T) {
svc: &mock,
}
- err := backend.createTable(ctx, tableName, "_")
+ err := b.createTable(ctx, aws.String(tableName), "_")
require.True(t, tc.errorIsFn(err), err)
})
}
}
+
+// TestContinuousBackups verifies that the continuous backup state is set upon
+// startup of DynamoDB.
+func TestContinuousBackups(t *testing.T) {
+ ensureTestsEnabled(t)
+
+ // Create new backend with continuous backups enabled.
+ b, err := New(context.Background(), map[string]interface{}{
+ "table_name": uuid.NewString() + "-test",
+ "continuous_backups": true,
+ })
+ require.NoError(t, err)
+
+ // Remove table after tests are done.
+ t.Cleanup(func() {
+ require.NoError(t, deleteTable(context.Background(), b.svc, b.Config.TableName))
+ })
+
+ // Check status of continuous backups.
+ ok, err := getContinuousBackups(context.Background(), b.svc, b.Config.TableName)
+ require.NoError(t, err)
+ require.True(t, ok)
+}
+
+// TestAutoScaling verifies that auto scaling is enabled upon startup of DynamoDB.
+func TestAutoScaling(t *testing.T) {
+ ensureTestsEnabled(t)
+
+ // Create new backend with auto scaling enabled.
+ b, err := New(context.Background(), map[string]interface{}{
+ "table_name": uuid.NewString() + "-test",
+ "auto_scaling": true,
+ "read_min_capacity": 10,
+ "read_max_capacity": 20,
+ "read_target_value": 50.0,
+ "write_min_capacity": 10,
+ "write_max_capacity": 20,
+ "write_target_value": 50.0,
+ // Billing mode must be set to provisioned mode for the
+ // auto-scaling option to be respected.
+ "billing_mode": billingModeProvisioned,
+ })
+ require.NoError(t, err)
+
+ // Remove table after tests are done.
+ t.Cleanup(func() {
+ require.NoError(t, deleteTable(context.Background(), b.svc, b.Config.TableName))
+ })
+
+ awsConfig, err := config.LoadDefaultConfig(context.Background())
+ require.NoError(t, err)
+
+ expected := &AutoScalingParams{
+ ReadMinCapacity: 10,
+ ReadMaxCapacity: 20,
+ ReadTargetValue: 50.0,
+ WriteMinCapacity: 10,
+ WriteMaxCapacity: 20,
+ WriteTargetValue: 50.0,
+ }
+ // Check auto scaling values match.
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ resp, err := getAutoScaling(context.Background(), applicationautoscaling.NewFromConfig(awsConfig), b.Config.TableName)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, resp)
+ }, 10*time.Second, 500*time.Millisecond)
+}
+
+// getContinuousBackups gets the state of continuous backups.
+func getContinuousBackups(ctx context.Context, svc dynamoClient, tableName string) (bool, error) {
+ resp, err := svc.DescribeContinuousBackups(ctx, &dynamodb.DescribeContinuousBackupsInput{
+ TableName: aws.String(tableName),
+ })
+ if err != nil {
+ return false, convertError(err)
+ }
+
+ switch resp.ContinuousBackupsDescription.PointInTimeRecoveryDescription.PointInTimeRecoveryStatus {
+ case types.PointInTimeRecoveryStatusEnabled:
+ return true, nil
+ case types.PointInTimeRecoveryStatusDisabled:
+ return false, nil
+ default:
+ return false, trace.BadParameter("dynamo returned unknown state for continuous backups: %v",
+ resp.ContinuousBackupsDescription.PointInTimeRecoveryDescription.PointInTimeRecoveryStatus)
+ }
+}
+
+type AutoScalingParams struct {
+ // ReadMaxCapacity is the maximum provisioned read capacity.
+ ReadMaxCapacity int32
+ // ReadMinCapacity is the minimum provisioned read capacity.
+ ReadMinCapacity int32
+ // ReadTargetValue is the ratio of consumed read to provisioned capacity.
+ ReadTargetValue float64
+ // WriteMaxCapacity is the maximum provisioned write capacity.
+ WriteMaxCapacity int32
+ // WriteMinCapacity is the minimum provisioned write capacity.
+ WriteMinCapacity int32
+ // WriteTargetValue is the ratio of consumed write to provisioned capacity.
+ WriteTargetValue float64
+}
+
+// getAutoScaling gets the state of auto scaling.
+func getAutoScaling(ctx context.Context, svc *applicationautoscaling.Client, tableName string) (*AutoScalingParams, error) {
+ var resp AutoScalingParams
+
+ // Get scaling targets.
+ targetResponse, err := svc.DescribeScalableTargets(ctx, &applicationautoscaling.DescribeScalableTargetsInput{
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ })
+ if err != nil {
+ return nil, convertError(err)
+ }
+ for _, target := range targetResponse.ScalableTargets {
+ switch target.ScalableDimension {
+ case autoscalingtypes.ScalableDimensionDynamoDBTableReadCapacityUnits:
+ resp.ReadMinCapacity = aws.ToInt32(target.MinCapacity)
+ resp.ReadMaxCapacity = aws.ToInt32(target.MaxCapacity)
+ case autoscalingtypes.ScalableDimensionDynamoDBTableWriteCapacityUnits:
+ resp.WriteMinCapacity = aws.ToInt32(target.MinCapacity)
+ resp.WriteMaxCapacity = aws.ToInt32(target.MaxCapacity)
+ }
+ }
+
+ // Get scaling policies.
+ policyResponse, err := svc.DescribeScalingPolicies(ctx, &applicationautoscaling.DescribeScalingPoliciesInput{
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ })
+ if err != nil {
+ return nil, convertError(err)
+ }
+ for i := 0; i < len(policyResponse.ScalingPolicies); i++ {
+ policy := policyResponse.ScalingPolicies[i]
+ switch aws.ToString(policy.PolicyName) {
+ case fmt.Sprintf("%v-%v", tableName, readScalingPolicySuffix):
+ resp.ReadTargetValue = aws.ToFloat64(policy.TargetTrackingScalingPolicyConfiguration.TargetValue)
+ case fmt.Sprintf("%v-%v", tableName, writeScalingPolicySuffix):
+ resp.WriteTargetValue = aws.ToFloat64(policy.TargetTrackingScalingPolicyConfiguration.TargetValue)
+ }
+ }
+
+ return &resp, nil
+}
+
+// deleteTable will remove a table.
+func deleteTable(ctx context.Context, svc dynamoClient, tableName string) error {
+ _, err := svc.DeleteTable(ctx, &dynamodb.DeleteTableInput{
+ TableName: aws.String(tableName),
+ })
+ if err != nil {
+ return convertError(err)
+ }
+
+ waiter := dynamodb.NewTableNotExistsWaiter(svc)
+ if err := waiter.Wait(ctx,
+ &dynamodb.DescribeTableInput{
+ TableName: aws.String(tableName),
+ },
+ 10*time.Minute,
+ ); err != nil {
+ return convertError(err)
+ }
+ return nil
+}
+
+const (
+ readScalingPolicySuffix = "read-target-tracking-scaling-policy"
+ writeScalingPolicySuffix = "write-target-tracking-scaling-policy"
+)
diff --git a/lib/backend/dynamo/shards.go b/lib/backend/dynamo/shards.go
index 52641b6208dde..8ee52ba2f1d91 100644
--- a/lib/backend/dynamo/shards.go
+++ b/lib/backend/dynamo/shards.go
@@ -24,10 +24,11 @@ import (
"io"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams"
+ streamtypes "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types"
"github.com/gravitational/trace"
"github.com/gravitational/teleport/api/types"
@@ -36,9 +37,9 @@ import (
)
type shardEvent struct {
- events []backend.Event
- shardID string
err error
+ shardID string
+ events []backend.Event
}
func (b *Backend) asyncPollStreams(ctx context.Context) error {
@@ -81,19 +82,19 @@ func (b *Backend) pollStreams(externalCtx context.Context) error {
if err != nil {
return trace.Wrap(err)
}
- b.Debugf("Found latest event stream %v.", aws.StringValue(streamArn))
+ b.Debugf("Found latest event stream %v.", aws.ToString(streamArn))
set := make(map[string]struct{})
eventsC := make(chan shardEvent)
- shouldStartPoll := func(shard *dynamodbstreams.Shard) bool {
- sid := aws.StringValue(shard.ShardId)
+ shouldStartPoll := func(shard streamtypes.Shard) bool {
+ sid := aws.ToString(shard.ShardId)
if _, ok := set[sid]; ok {
// already being polled
return false
}
- if _, ok := set[aws.StringValue(shard.ParentShardId)]; ok {
- b.Tracef("Skipping child shard: %s, still polling parent %s", sid, aws.StringValue(shard.ParentShardId))
+ if _, ok := set[aws.ToString(shard.ParentShardId)]; ok {
+ b.Tracef("Skipping child shard: %s, still polling parent %s", sid, aws.ToString(shard.ParentShardId))
// still processing parent
return false
}
@@ -118,7 +119,7 @@ func (b *Backend) pollStreams(externalCtx context.Context) error {
if !shouldStartPoll(shards[i]) {
continue
}
- shardID := aws.StringValue(shards[i].ShardId)
+ shardID := aws.ToString(shards[i].ShardId)
b.Tracef("Adding active shard %v.", shardID)
set[shardID] = struct{}{}
go b.asyncPollShard(ctx, streamArn, shards[i], eventsC, initC)
@@ -184,7 +185,7 @@ func (b *Backend) pollStreams(externalCtx context.Context) error {
}
func (b *Backend) findStream(ctx context.Context) (*string, error) {
- status, err := b.svc.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{
+ status, err := b.svc.DescribeTable(ctx, &dynamodb.DescribeTableInput{
TableName: aws.String(b.TableName),
})
if err != nil {
@@ -196,10 +197,10 @@ func (b *Backend) findStream(ctx context.Context) (*string, error) {
return status.Table.LatestStreamArn, nil
}
-func (b *Backend) pollShard(ctx context.Context, streamArn *string, shard *dynamodbstreams.Shard, eventsC chan shardEvent, initC chan<- error) error {
- shardIterator, err := b.streams.GetShardIteratorWithContext(ctx, &dynamodbstreams.GetShardIteratorInput{
+func (b *Backend) pollShard(ctx context.Context, streamArn *string, shard streamtypes.Shard, eventsC chan shardEvent, initC chan<- error) error {
+ shardIterator, err := b.streams.GetShardIterator(ctx, &dynamodbstreams.GetShardIteratorInput{
ShardId: shard.ShardId,
- ShardIteratorType: aws.String(dynamodbstreams.ShardIteratorTypeLatest),
+ ShardIteratorType: streamtypes.ShardIteratorTypeLatest,
StreamArn: streamArn,
})
@@ -217,13 +218,13 @@ func (b *Backend) pollShard(ctx context.Context, streamArn *string, shard *dynam
ticker := time.NewTicker(b.PollStreamPeriod)
defer ticker.Stop()
iterator := shardIterator.ShardIterator
- shardID := aws.StringValue(shard.ShardId)
+ shardID := aws.ToString(shard.ShardId)
for {
select {
case <-ctx.Done():
return trace.ConnectionProblem(ctx.Err(), "context is closing")
case <-ticker.C:
- out, err := b.streams.GetRecordsWithContext(ctx, &dynamodbstreams.GetRecordsInput{
+ out, err := b.streams.GetRecords(ctx, &dynamodbstreams.GetRecordsInput{
ShardIterator: iterator,
})
if err != nil {
@@ -234,14 +235,14 @@ func (b *Backend) pollShard(ctx context.Context, streamArn *string, shard *dynam
}
if len(out.Records) == 0 {
if out.NextShardIterator == nil {
- b.Tracef("Shard is closed: %v.", aws.StringValue(shard.ShardId))
+ b.Tracef("Shard is closed: %v.", aws.ToString(shard.ShardId))
return io.EOF
}
iterator = out.NextShardIterator
continue
}
if out.NextShardIterator == nil {
- b.Tracef("Shard is closed: %v.", aws.StringValue(shard.ShardId))
+ b.Tracef("Shard is closed: %v.", aws.ToString(shard.ShardId))
return io.EOF
}
events := make([]backend.Event, 0, len(out.Records))
@@ -263,14 +264,14 @@ func (b *Backend) pollShard(ctx context.Context, streamArn *string, shard *dynam
}
// collectActiveShards collects shards
-func (b *Backend) collectActiveShards(ctx context.Context, streamArn *string) ([]*dynamodbstreams.Shard, error) {
- var out []*dynamodbstreams.Shard
+func (b *Backend) collectActiveShards(ctx context.Context, streamArn *string) ([]streamtypes.Shard, error) {
+ var out []streamtypes.Shard
input := &dynamodbstreams.DescribeStreamInput{
StreamArn: streamArn,
}
for {
- streamInfo, err := b.streams.DescribeStreamWithContext(ctx, input)
+ streamInfo, err := b.streams.DescribeStream(ctx, input)
if err != nil {
return nil, convertError(err)
}
@@ -282,8 +283,8 @@ func (b *Backend) collectActiveShards(ctx context.Context, streamArn *string) ([
}
}
-func filterActiveShards(shards []*dynamodbstreams.Shard) []*dynamodbstreams.Shard {
- var active []*dynamodbstreams.Shard
+func filterActiveShards(shards []streamtypes.Shard) []streamtypes.Shard {
+ var active []streamtypes.Shard
for i := range shards {
if shards[i].SequenceNumberRange.EndingSequenceNumber == nil {
active = append(active, shards[i])
@@ -292,18 +293,18 @@ func filterActiveShards(shards []*dynamodbstreams.Shard) []*dynamodbstreams.Shar
return active
}
-func toOpType(rec *dynamodbstreams.Record) (types.OpType, error) {
- switch aws.StringValue(rec.EventName) {
- case dynamodbstreams.OperationTypeInsert, dynamodbstreams.OperationTypeModify:
+func toOpType(rec streamtypes.Record) (types.OpType, error) {
+ switch rec.EventName {
+ case streamtypes.OperationTypeInsert, streamtypes.OperationTypeModify:
return types.OpPut, nil
- case dynamodbstreams.OperationTypeRemove:
+ case streamtypes.OperationTypeRemove:
return types.OpDelete, nil
default:
- return -1, trace.BadParameter("unsupported DynamodDB operation: %v", aws.StringValue(rec.EventName))
+ return -1, trace.BadParameter("unsupported DynamodDB operation: %v", rec.EventName)
}
}
-func toEvent(rec *dynamodbstreams.Record) (*backend.Event, error) {
+func toEvent(rec streamtypes.Record) (*backend.Event, error) {
op, err := toOpType(rec)
if err != nil {
return nil, trace.Wrap(err)
@@ -311,7 +312,7 @@ func toEvent(rec *dynamodbstreams.Record) (*backend.Event, error) {
switch op {
case types.OpPut:
var r record
- if err := dynamodbattribute.UnmarshalMap(rec.Dynamodb.NewImage, &r); err != nil {
+ if err := attributevalue.UnmarshalMap(rec.Dynamodb.NewImage, &r); err != nil {
return nil, trace.Wrap(err)
}
var expires time.Time
@@ -329,7 +330,7 @@ func toEvent(rec *dynamodbstreams.Record) (*backend.Event, error) {
}, nil
case types.OpDelete:
var r record
- if err := dynamodbattribute.UnmarshalMap(rec.Dynamodb.Keys, &r); err != nil {
+ if err := attributevalue.UnmarshalMap(rec.Dynamodb.Keys, &r); err != nil {
return nil, trace.Wrap(err)
}
return &backend.Event{
@@ -343,9 +344,9 @@ func toEvent(rec *dynamodbstreams.Record) (*backend.Event, error) {
}
}
-func (b *Backend) asyncPollShard(ctx context.Context, streamArn *string, shard *dynamodbstreams.Shard, eventsC chan shardEvent, initC chan<- error) {
+func (b *Backend) asyncPollShard(ctx context.Context, streamArn *string, shard streamtypes.Shard, eventsC chan shardEvent, initC chan<- error) {
var err error
- shardID := aws.StringValue(shard.ShardId)
+ shardID := aws.ToString(shard.ShardId)
defer func() {
if err == nil {
err = trace.BadParameter("shard %q exited unexpectedly", shardID)
diff --git a/lib/events/dynamoevents/dynamoevents.go b/lib/events/dynamoevents/dynamoevents.go
index 5a7cf589bda1b..d2383930794e9 100644
--- a/lib/events/dynamoevents/dynamoevents.go
+++ b/lib/events/dynamoevents/dynamoevents.go
@@ -27,31 +27,36 @@ import (
"fmt"
"maps"
"math"
+ "net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- awssession "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/applicationautoscaling"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue"
+ "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling"
+ autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling/types"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ dynamodbtypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
"github.com/gravitational/teleport"
apidefaults "github.com/gravitational/teleport/api/defaults"
"github.com/gravitational/teleport/api/types"
apievents "github.com/gravitational/teleport/api/types/events"
- "github.com/gravitational/teleport/lib/backend/dynamo"
+ "github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
+ "github.com/gravitational/teleport/lib/modules"
+ awsmetrics "github.com/gravitational/teleport/lib/observability/metrics/aws"
dynamometrics "github.com/gravitational/teleport/lib/observability/metrics/dynamo"
"github.com/gravitational/teleport/lib/utils"
)
@@ -72,24 +77,24 @@ const (
)
// Defines the attribute schema for the DynamoDB event table and index.
-var tableSchema = []*dynamodb.AttributeDefinition{
+var tableSchema = []dynamodbtypes.AttributeDefinition{
// Existing attributes pre RFD 24.
{
AttributeName: aws.String(keySessionID),
- AttributeType: aws.String("S"),
+ AttributeType: dynamodbtypes.ScalarAttributeTypeS,
},
{
AttributeName: aws.String(keyEventIndex),
- AttributeType: aws.String("N"),
+ AttributeType: dynamodbtypes.ScalarAttributeTypeN,
},
{
AttributeName: aws.String(keyCreatedAt),
- AttributeType: aws.String("N"),
+ AttributeType: dynamodbtypes.ScalarAttributeTypeN,
},
// New attribute in RFD 24.
{
AttributeName: aws.String(keyDate),
- AttributeType: aws.String("S"),
+ AttributeType: dynamodbtypes.ScalarAttributeTypeS,
},
}
@@ -117,15 +122,15 @@ type Config struct {
DisableConflictCheck bool
// ReadMaxCapacity is the maximum provisioned read capacity.
- ReadMaxCapacity int64
+ ReadMaxCapacity int32
// ReadMinCapacity is the minimum provisioned read capacity.
- ReadMinCapacity int64
+ ReadMinCapacity int32
// ReadTargetValue is the ratio of consumed read to provisioned capacity.
ReadTargetValue float64
// WriteMaxCapacity is the maximum provisioned write capacity.
- WriteMaxCapacity int64
+ WriteMaxCapacity int32
// WriteMinCapacity is the minimum provisioned write capacity.
- WriteMinCapacity int64
+ WriteMinCapacity int32
// WriteTargetValue is the ratio of consumed write to provisioned capacity.
WriteTargetValue float64
@@ -204,10 +209,7 @@ type Log struct {
*log.Entry
// Config is a backend configuration
Config
- svc dynamodbiface.DynamoDBAPI
-
- // session holds the AWS client.
- session *awssession.Session
+ svc *dynamodb.Client
}
type event struct {
@@ -215,7 +217,7 @@ type event struct {
EventIndex int64
EventType string
CreatedAt int64
- Expires *int64 `json:"Expires,omitempty"`
+ Expires *int64 `json:"Expires,omitempty" dynamodbav:",omitempty"`
FieldsMap events.EventFields
EventNamespace string
CreatedAtDate string
@@ -266,108 +268,218 @@ func New(ctx context.Context, cfg Config) (*Log, error) {
if err != nil {
return nil, trace.Wrap(err)
}
- b := &Log{
- Entry: l,
- Config: cfg,
- }
- awsConfig := aws.Config{}
+ opts := []func(*config.LoadOptions) error{
+ config.WithRegion(cfg.Region),
+ config.WithHTTPClient(&http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ MaxIdleConns: defaults.HTTPMaxIdleConns,
+ MaxIdleConnsPerHost: defaults.HTTPMaxIdleConnsPerHost,
+ },
+ }),
+ config.WithAPIOptions(awsmetrics.MetricsMiddleware()),
+ config.WithAPIOptions(dynamometrics.MetricsMiddleware(dynamometrics.Backend)),
+ }
- // Override the default environment's region if value set in YAML file:
- if cfg.Region != "" {
- awsConfig.Region = aws.String(cfg.Region)
+ awsConfig, err := config.LoadDefaultConfig(ctx, opts...)
+ if err != nil {
+ return nil, trace.Wrap(err)
}
+ otelaws.AppendMiddlewares(&awsConfig.APIOptions, otelaws.WithAttributeSetter(otelaws.DynamoDBAttributeSetter))
+
+ var dynamoOpts []func(*dynamodb.Options)
+
// Override the service endpoint using the "endpoint" query parameter from
// "audit_events_uri". This is for non-AWS DynamoDB-compatible backends.
if cfg.Endpoint != "" {
- awsConfig.Endpoint = aws.String(cfg.Endpoint)
+ u, err := url.Parse(cfg.Endpoint)
+ if err != nil {
+ return nil, trace.BadParameter("configured DynamoDB events endpoint is invalid: %s", err.Error())
+ }
+
+ dynamoOpts = append(dynamoOpts, dynamodb.WithEndpointResolverV2(&staticResolver{endpoint: u}))
}
- b.session, err = awssession.NewSessionWithOptions(awssession.Options{
- SharedConfigState: awssession.SharedConfigEnable,
- Config: awsConfig,
- })
- if err != nil {
- return nil, trace.Wrap(err)
+ // FIPS settings are applied on the individual service instead of the aws config,
+ // as DynamoDB Streams and Application Auto Scaling do not yet have FIPS endpoints in non-GovCloud.
+ // See also: https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service
+ if modules.GetModules().IsBoringBinary() && cfg.UseFIPSEndpoint == types.ClusterAuditConfigSpecV2_FIPS_ENABLED {
+ dynamoOpts = append(dynamoOpts, func(o *dynamodb.Options) {
+ o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ })
}
- // Create DynamoDB service.
- svc, err := dynamometrics.NewAPIMetrics(dynamometrics.Events, dynamodb.New(b.session, &aws.Config{
- // Setting this on the individual service instead of the session, as DynamoDB Streams
- // and Application Auto Scaling do not yet have FIPS endpoints in non-GovCloud.
- // See also: https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service
- UseFIPSEndpoint: events.FIPSProtoStateToAWSState(cfg.UseFIPSEndpoint),
- }))
- if err != nil {
+ b := &Log{
+ Entry: l,
+ Config: cfg,
+ svc: dynamodb.NewFromConfig(awsConfig, dynamoOpts...),
+ }
+
+ if err := b.configureTable(ctx, applicationautoscaling.NewFromConfig(awsConfig)); err != nil {
return nil, trace.Wrap(err)
}
- b.svc = svc
+ return b, nil
+}
+
+type staticResolver struct {
+ endpoint *url.URL
+}
+
+func (s *staticResolver) ResolveEndpoint(ctx context.Context, params dynamodb.EndpointParameters) (smithyendpoints.Endpoint, error) {
+ return smithyendpoints.Endpoint{URI: *s.endpoint}, nil
+}
+
+type tableStatus int
+
+const (
+ tableStatusError = iota
+ tableStatusMissing
+ tableStatusNeedsMigration
+ tableStatusOK
+)
+
+func (l *Log) configureTable(ctx context.Context, svc *applicationautoscaling.Client) error {
// check if the table exists?
- ts, err := b.getTableStatus(ctx, b.Tablename)
+ ts, err := l.getTableStatus(ctx, l.Tablename)
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(err)
}
switch ts {
case tableStatusOK:
break
case tableStatusMissing:
- err = b.createTable(ctx, b.Tablename)
+ err = l.createTable(ctx, l.Tablename)
case tableStatusNeedsMigration:
- return nil, trace.BadParameter("unsupported schema")
+ return trace.BadParameter("unsupported schema")
}
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(err)
}
- err = dynamo.TurnOnTimeToLive(ctx, b.svc, b.Tablename, keyExpires)
+ tableName := aws.String(l.Tablename)
+ ttlStatus, err := l.svc.DescribeTimeToLive(ctx, &dynamodb.DescribeTimeToLiveInput{
+ TableName: tableName,
+ })
if err != nil {
- return nil, trace.Wrap(err)
+ return trace.Wrap(convertError(err))
+ }
+ switch ttlStatus.TimeToLiveDescription.TimeToLiveStatus {
+ case dynamodbtypes.TimeToLiveStatusEnabled, dynamodbtypes.TimeToLiveStatusEnabling:
+ default:
+ _, err = l.svc.UpdateTimeToLive(ctx, &dynamodb.UpdateTimeToLiveInput{
+ TableName: tableName,
+ TimeToLiveSpecification: &dynamodbtypes.TimeToLiveSpecification{
+ AttributeName: aws.String(keyExpires),
+ Enabled: aws.Bool(true),
+ },
+ })
+ if err != nil {
+ return trace.Wrap(convertError(err))
+ }
}
// Enable continuous backups if requested.
- if b.Config.EnableContinuousBackups {
- if err := dynamo.SetContinuousBackups(ctx, b.svc, b.Tablename); err != nil {
- return nil, trace.Wrap(err)
+ if l.Config.EnableContinuousBackups {
+ // Make request to AWS to update continuous backups settings.
+ _, err := l.svc.UpdateContinuousBackups(ctx, &dynamodb.UpdateContinuousBackupsInput{
+ PointInTimeRecoverySpecification: &dynamodbtypes.PointInTimeRecoverySpecification{
+ PointInTimeRecoveryEnabled: aws.Bool(true),
+ },
+ TableName: tableName,
+ })
+ if err != nil {
+ return trace.Wrap(convertError(err))
}
}
// Enable auto scaling if requested.
- if b.Config.EnableAutoScaling {
- if err := dynamo.SetAutoScaling(ctx, applicationautoscaling.New(b.session), dynamo.GetTableID(b.Tablename), dynamo.AutoScalingParams{
- ReadMinCapacity: b.Config.ReadMinCapacity,
- ReadMaxCapacity: b.Config.ReadMaxCapacity,
- ReadTargetValue: b.Config.ReadTargetValue,
- WriteMinCapacity: b.Config.WriteMinCapacity,
- WriteMaxCapacity: b.Config.WriteMaxCapacity,
- WriteTargetValue: b.Config.WriteTargetValue,
- }); err != nil {
- return nil, trace.Wrap(err)
+ if l.Config.EnableAutoScaling {
+ type autoscalingParams struct {
+ readDimension autoscalingtypes.ScalableDimension
+ writeDimension autoscalingtypes.ScalableDimension
+ resourceID string
+ readPolicy string
+ writePolicy string
}
- if err := dynamo.SetAutoScaling(ctx, applicationautoscaling.New(b.session), dynamo.GetIndexID(b.Tablename, indexTimeSearchV2), dynamo.AutoScalingParams{
- ReadMinCapacity: b.Config.ReadMinCapacity,
- ReadMaxCapacity: b.Config.ReadMaxCapacity,
- ReadTargetValue: b.Config.ReadTargetValue,
- WriteMinCapacity: b.Config.WriteMinCapacity,
- WriteMaxCapacity: b.Config.WriteMaxCapacity,
- WriteTargetValue: b.Config.WriteTargetValue,
- }); err != nil {
- return nil, trace.Wrap(err)
+ params := []autoscalingParams{
+ {
+ readDimension: autoscalingtypes.ScalableDimensionDynamoDBTableReadCapacityUnits,
+ writeDimension: autoscalingtypes.ScalableDimensionDynamoDBTableWriteCapacityUnits,
+ resourceID: fmt.Sprintf("table/%s", l.Tablename),
+ readPolicy: fmt.Sprintf("%s-write-target-tracking-scaling-policy", l.Tablename),
+ writePolicy: fmt.Sprintf("%s-write-target-tracking-scaling-policy", l.Tablename),
+ },
+ {
+ readDimension: autoscalingtypes.ScalableDimensionDynamoDBIndexReadCapacityUnits,
+ writeDimension: autoscalingtypes.ScalableDimensionDynamoDBIndexWriteCapacityUnits,
+ resourceID: fmt.Sprintf("table/%s/index/%s", l.Tablename, indexTimeSearchV2),
+ readPolicy: fmt.Sprintf("%s/index/%s-write-target-tracking-scaling-policy", l.Tablename, indexTimeSearchV2),
+ writePolicy: fmt.Sprintf("%s/index/%s-write-target-tracking-scaling-policy", l.Tablename, indexTimeSearchV2),
+ },
}
- }
- return b, nil
-}
+ for _, p := range params {
+ // Define scaling targets. Defines minimum and maximum {read,write} capacity.
+ if _, err := svc.RegisterScalableTarget(ctx, &applicationautoscaling.RegisterScalableTargetInput{
+ MinCapacity: aws.Int32(l.ReadMinCapacity),
+ MaxCapacity: aws.Int32(l.ReadMaxCapacity),
+ ResourceId: aws.String(p.resourceID),
+ ScalableDimension: p.readDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ }); err != nil {
+ return trace.Wrap(convertError(err))
+ }
+ if _, err := svc.RegisterScalableTarget(ctx, &applicationautoscaling.RegisterScalableTargetInput{
+ MinCapacity: aws.Int32(l.WriteMinCapacity),
+ MaxCapacity: aws.Int32(l.WriteMaxCapacity),
+ ResourceId: aws.String(p.resourceID),
+ ScalableDimension: p.writeDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ }); err != nil {
+ return trace.Wrap(convertError(err))
+ }
-type tableStatus int
+ // Define scaling policy. Defines the ratio of {read,write} consumed capacity to
+ // provisioned capacity DynamoDB will try and maintain.
+ if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{
+ PolicyName: aws.String(p.readPolicy),
+ PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling,
+ ResourceId: aws.String(p.resourceID),
+ ScalableDimension: p.readDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{
+ PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{
+ PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBReadCapacityUtilization,
+ },
+ TargetValue: aws.Float64(l.ReadTargetValue),
+ },
+ }); err != nil {
+ return trace.Wrap(convertError(err))
+ }
-const (
- tableStatusError = iota
- tableStatusMissing
- tableStatusNeedsMigration
- tableStatusOK
-)
+ if _, err := svc.PutScalingPolicy(ctx, &applicationautoscaling.PutScalingPolicyInput{
+ PolicyName: aws.String(p.writePolicy),
+ PolicyType: autoscalingtypes.PolicyTypeTargetTrackingScaling,
+ ResourceId: aws.String(p.resourceID),
+ ScalableDimension: p.writeDimension,
+ ServiceNamespace: autoscalingtypes.ServiceNamespaceDynamodb,
+ TargetTrackingScalingPolicyConfiguration: &autoscalingtypes.TargetTrackingScalingPolicyConfiguration{
+ PredefinedMetricSpecification: &autoscalingtypes.PredefinedMetricSpecification{
+ PredefinedMetricType: autoscalingtypes.MetricTypeDynamoDBWriteCapacityUtilization,
+ },
+ TargetValue: aws.Float64(l.WriteTargetValue),
+ },
+ }); err != nil {
+ return trace.Wrap(convertError(err))
+ }
+ }
+ }
+
+ return nil
+}
// EmitAuditEvent emits audit event
func (l *Log) EmitAuditEvent(ctx context.Context, in apievents.AuditEvent) error {
@@ -451,7 +563,7 @@ func (l *Log) putAuditEvent(ctx context.Context, sessionID string, in apievents.
return trace.Wrap(err)
}
- if _, err = l.svc.PutItemWithContext(ctx, input); err != nil {
+ if _, err = l.svc.PutItem(ctx, input); err != nil {
err = convertError(err)
switch {
@@ -492,7 +604,7 @@ func (l *Log) createPutItem(sessionID string, in apievents.AuditEvent) (*dynamod
CreatedAtDate: in.GetTime().Format(iso8601DateFormat),
}
l.setExpiry(&e)
- av, err := dynamodbattribute.MarshalMap(e)
+ av, err := attributevalue.MarshalMap(e)
if err != nil {
return nil, trace.Wrap(err)
}
@@ -546,7 +658,7 @@ type checkpointKey struct {
Date string `json:"date,omitempty"`
// A DynamoDB query iterator. Allows us to resume a partial query.
- Iterator map[string]*dynamodb.AttributeValue `json:"iterator,omitempty"`
+ Iterator string `json:"iterator,omitempty"`
// EventKey is a derived identifier for an event used for resuming
// sub-page breaks due to size constraints.
@@ -676,11 +788,11 @@ func (l *Log) searchEventsRaw(ctx context.Context, fromUTC, toUTC time.Time, nam
}
indexName := aws.String(indexTimeSearchV2)
- var left int64
+ var left int32
if limit != 0 {
- left = int64(limit)
+ left = int32(limit)
} else {
- left = math.MaxInt64
+ left = math.MaxInt32
}
// Resume scanning at the correct date. We need to do this because we send individual queries per date
@@ -769,11 +881,11 @@ func GetCreatedAtFromStartKey(startKey string) (time.Time, error) {
if err != nil {
return time.Time{}, trace.Wrap(err)
}
- if checkpoint.Iterator == nil {
+ if checkpoint.Iterator == "" {
return time.Time{}, errors.New("missing iterator")
}
var e event
- if err := dynamodbattribute.UnmarshalMap(checkpoint.Iterator, &e); err != nil {
+ if err := json.Unmarshal([]byte(checkpoint.Iterator), &e); err != nil {
return time.Time{}, trace.Wrap(err)
}
if e.CreatedAt <= 0 {
@@ -792,7 +904,7 @@ func getCheckpointFromStartKey(startKey string) (checkpointKey, error) {
}
// If a checkpoint key is provided, unmarshal it so we can work with it's parts.
if err := json.Unmarshal([]byte(startKey), &checkpoint); err != nil {
- return checkpoint, trace.Wrap(err)
+ return checkpointKey{}, trace.Wrap(err)
}
return checkpoint, nil
}
@@ -923,7 +1035,7 @@ func fromWhereExpr(cond *types.WhereExpr, params *condFilterParams) (string, err
// getTableStatus checks if a given table exists
func (l *Log) getTableStatus(ctx context.Context, tableName string) (tableStatus, error) {
- _, err := l.svc.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{
+ _, err := l.svc.DescribeTable(ctx, &dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
})
err = convertError(err)
@@ -938,7 +1050,7 @@ func (l *Log) getTableStatus(ctx context.Context, tableName string) (tableStatus
// indexExists checks if a given index exists on a given table and that it is active or updating.
func (l *Log) indexExists(ctx context.Context, tableName, indexName string) (bool, error) {
- tableDescription, err := l.svc.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{
+ tableDescription, err := l.svc.DescribeTable(ctx, &dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
})
if err != nil {
@@ -946,7 +1058,7 @@ func (l *Log) indexExists(ctx context.Context, tableName, indexName string) (boo
}
for _, gsi := range tableDescription.Table.GlobalSecondaryIndexes {
- if *gsi.IndexName == indexName && (*gsi.IndexStatus == dynamodb.IndexStatusActive || *gsi.IndexStatus == dynamodb.IndexStatusUpdating) {
+ if *gsi.IndexName == indexName && (gsi.IndexStatus == dynamodbtypes.IndexStatusActive || gsi.IndexStatus == dynamodbtypes.IndexStatusUpdating) {
return true, nil
}
}
@@ -960,18 +1072,18 @@ func (l *Log) indexExists(ctx context.Context, tableName, indexName string) (boo
// currently is always set to "FullPath" (used to be something else, that's
// why it's a parameter for migration purposes)
func (l *Log) createTable(ctx context.Context, tableName string) error {
- provisionedThroughput := dynamodb.ProvisionedThroughput{
+ provisionedThroughput := dynamodbtypes.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(l.ReadCapacityUnits),
WriteCapacityUnits: aws.Int64(l.WriteCapacityUnits),
}
- elems := []*dynamodb.KeySchemaElement{
+ elems := []dynamodbtypes.KeySchemaElement{
{
AttributeName: aws.String(keySessionID),
- KeyType: aws.String("HASH"),
+ KeyType: dynamodbtypes.KeyTypeHash,
},
{
AttributeName: aws.String(keyEventIndex),
- KeyType: aws.String("RANGE"),
+ KeyType: dynamodbtypes.KeyTypeRange,
},
}
c := dynamodb.CreateTableInput{
@@ -979,38 +1091,41 @@ func (l *Log) createTable(ctx context.Context, tableName string) error {
AttributeDefinitions: tableSchema,
KeySchema: elems,
ProvisionedThroughput: &provisionedThroughput,
- GlobalSecondaryIndexes: []*dynamodb.GlobalSecondaryIndex{
+ GlobalSecondaryIndexes: []dynamodbtypes.GlobalSecondaryIndex{
{
IndexName: aws.String(indexTimeSearchV2),
- KeySchema: []*dynamodb.KeySchemaElement{
+ KeySchema: []dynamodbtypes.KeySchemaElement{
{
// Partition by date instead of namespace.
AttributeName: aws.String(keyDate),
- KeyType: aws.String("HASH"),
+ KeyType: dynamodbtypes.KeyTypeHash,
},
{
AttributeName: aws.String(keyCreatedAt),
- KeyType: aws.String("RANGE"),
+ KeyType: dynamodbtypes.KeyTypeRange,
},
},
- Projection: &dynamodb.Projection{
- ProjectionType: aws.String("ALL"),
+ Projection: &dynamodbtypes.Projection{
+ ProjectionType: dynamodbtypes.ProjectionTypeAll,
},
ProvisionedThroughput: &provisionedThroughput,
},
},
}
- _, err := l.svc.CreateTableWithContext(ctx, &c)
+ _, err := l.svc.CreateTable(ctx, &c)
if err != nil {
return trace.Wrap(err)
}
log.Infof("Waiting until table %q is created", tableName)
- err = l.svc.WaitUntilTableExistsWithContext(ctx, &dynamodb.DescribeTableInput{
- TableName: aws.String(tableName),
- })
+ waiter := dynamodb.NewTableExistsWaiter(l.svc)
+ err = waiter.Wait(ctx,
+ &dynamodb.DescribeTableInput{TableName: aws.String(tableName)},
+ 10*time.Minute,
+ )
if err == nil {
log.Infof("Table %q has been created", tableName)
}
+
return trace.Wrap(err)
}
@@ -1021,15 +1136,15 @@ func (l *Log) Close() error {
// deleteAllItems deletes all items from the database, used in tests
func (l *Log) deleteAllItems(ctx context.Context) error {
- out, err := l.svc.ScanWithContext(ctx, &dynamodb.ScanInput{TableName: aws.String(l.Tablename)})
+ out, err := l.svc.Scan(ctx, &dynamodb.ScanInput{TableName: aws.String(l.Tablename)})
if err != nil {
return trace.Wrap(err)
}
- var requests []*dynamodb.WriteRequest
+ var requests []dynamodbtypes.WriteRequest
for _, item := range out.Items {
- requests = append(requests, &dynamodb.WriteRequest{
- DeleteRequest: &dynamodb.DeleteRequest{
- Key: map[string]*dynamodb.AttributeValue{
+ requests = append(requests, dynamodbtypes.WriteRequest{
+ DeleteRequest: &dynamodbtypes.DeleteRequest{
+ Key: map[string]dynamodbtypes.AttributeValue{
keySessionID: item[keySessionID],
keyEventIndex: item[keyEventIndex],
},
@@ -1045,8 +1160,8 @@ func (l *Log) deleteAllItems(ctx context.Context) error {
chunk := requests[:top]
requests = requests[top:]
- _, err := l.svc.BatchWriteItemWithContext(ctx, &dynamodb.BatchWriteItemInput{
- RequestItems: map[string][]*dynamodb.WriteRequest{
+ _, err := l.svc.BatchWriteItem(ctx, &dynamodb.BatchWriteItemInput{
+ RequestItems: map[string][]dynamodbtypes.WriteRequest{
l.Tablename: chunk,
},
})
@@ -1062,15 +1177,20 @@ func (l *Log) deleteAllItems(ctx context.Context) error {
// deleteTable deletes DynamoDB table with a given name
func (l *Log) deleteTable(ctx context.Context, tableName string, wait bool) error {
tn := aws.String(tableName)
- _, err := l.svc.DeleteTableWithContext(ctx, &dynamodb.DeleteTableInput{TableName: tn})
+ _, err := l.svc.DeleteTable(ctx, &dynamodb.DeleteTableInput{TableName: tn})
if err != nil {
return trace.Wrap(err)
}
- if wait {
- return trace.Wrap(
- l.svc.WaitUntilTableNotExistsWithContext(ctx, &dynamodb.DescribeTableInput{TableName: tn}))
+ if !wait {
+ return nil
}
- return nil
+
+ waiter := dynamodb.NewTableNotExistsWaiter(l.svc)
+
+ return trace.Wrap(waiter.Wait(ctx,
+ &dynamodb.DescribeTableInput{TableName: tn},
+ 10*time.Minute,
+ ))
}
var errAWSValidation = errors.New("aws validation error")
@@ -1079,34 +1199,47 @@ func convertError(err error) error {
if err == nil {
return nil
}
- var aerr awserr.Error
- if !errors.As(err, &aerr) {
- return err
+
+ var conditionalCheckFailedError *dynamodbtypes.ConditionalCheckFailedException
+ if errors.As(err, &conditionalCheckFailedError) {
+ return trace.AlreadyExists(conditionalCheckFailedError.ErrorMessage())
}
- switch aerr.Code() {
- case dynamodb.ErrCodeConditionalCheckFailedException:
- return trace.AlreadyExists(aerr.Error())
- case dynamodb.ErrCodeProvisionedThroughputExceededException:
- return trace.ConnectionProblem(aerr, aerr.Error())
- case dynamodb.ErrCodeResourceNotFoundException:
- return trace.NotFound(aerr.Error())
- case dynamodb.ErrCodeItemCollectionSizeLimitExceededException:
- return trace.BadParameter(aerr.Error())
- case dynamodb.ErrCodeInternalServerError:
- return trace.BadParameter(aerr.Error())
- case ErrValidationException:
- // A ValidationException type is missing from AWS SDK.
- // Use errAWSValidation that for most cases will contain:
- // "Item size has exceeded the maximum allowed size" AWS validation error.
- return trace.Wrap(errAWSValidation, aerr.Error())
- default:
- return err
+ var throughputExceededError *dynamodbtypes.ProvisionedThroughputExceededException
+ if errors.As(err, &throughputExceededError) {
+ return trace.ConnectionProblem(throughputExceededError, throughputExceededError.ErrorMessage())
+ }
+
+ var notFoundError *dynamodbtypes.ResourceNotFoundException
+ if errors.As(err, ¬FoundError) {
+ return trace.NotFound(notFoundError.ErrorMessage())
+ }
+
+ var collectionLimitExceededError *dynamodbtypes.ItemCollectionSizeLimitExceededException
+ if errors.As(err, ¬FoundError) {
+ return trace.BadParameter(collectionLimitExceededError.ErrorMessage())
+ }
+
+ var internalError *dynamodbtypes.InternalServerError
+ if errors.As(err, &internalError) {
+ return trace.BadParameter(internalError.ErrorMessage())
}
+
+ var ae smithy.APIError
+ if errors.As(err, &ae) {
+ if ae.ErrorCode() == ErrValidationException {
+ // A ValidationException type is missing from AWS SDK.
+ // Use errAWSValidation that for most cases will contain:
+ // "Item size has exceeded the maximum allowed size" AWS validation error.
+ return trace.Wrap(errAWSValidation, ae.Error())
+ }
+ }
+
+ return err
}
type query interface {
- QueryWithContext(ctx context.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error)
+ Query(ctx context.Context, params *dynamodb.QueryInput, optFns ...func(*dynamodb.Options)) (*dynamodb.QueryOutput, error)
}
type eventsFetcher struct {
@@ -1118,7 +1251,7 @@ type eventsFetcher struct {
checkpoint *checkpointKey
foundStart bool
dates []string
- left int64
+ left int32
fromUTC time.Time
toUTC time.Time
@@ -1129,13 +1262,26 @@ type eventsFetcher struct {
}
func (l *eventsFetcher) processQueryOutput(output *dynamodb.QueryOutput, hasLeftFun func() bool) ([]event, bool, error) {
- var out []event
oldIterator := l.checkpoint.Iterator
- l.checkpoint.Iterator = output.LastEvaluatedKey
+ l.checkpoint.Iterator = ""
+ if output.LastEvaluatedKey != nil {
+ m := make(map[string]any)
+ if err := attributevalue.UnmarshalMap(output.LastEvaluatedKey, &m); err != nil {
+ return nil, false, trace.Wrap(err)
+ }
+
+ iter, err := json.Marshal(&m)
+ if err != nil {
+ return nil, false, err
+ }
+ l.checkpoint.Iterator = string(iter)
+ }
+
+ var out []event
for _, item := range output.Items {
var e event
- if err := dynamodbattribute.UnmarshalMap(item, &e); err != nil {
+ if err := attributevalue.UnmarshalMap(item, &e); err != nil {
return nil, false, trace.WrapWithMessage(err, "failed to unmarshal event")
}
data, err := json.Marshal(e.FieldsMap)
@@ -1180,7 +1326,7 @@ func (l *eventsFetcher) processQueryOutput(output *dynamodb.QueryOutput, hasLeft
if hasLeftFun != nil {
hf = hasLeftFun()
}
- l.hasLeft = hf || len(l.checkpoint.Iterator) != 0
+ l.hasLeft = hf || l.checkpoint.Iterator != ""
l.checkpoint.EventKey = ""
return out, true, nil
}
@@ -1190,10 +1336,6 @@ func (l *eventsFetcher) processQueryOutput(output *dynamodb.QueryOutput, hasLeft
func (l *eventsFetcher) QueryByDateIndex(ctx context.Context, filterExpr *string) (values []event, err error) {
query := "CreatedAtDate = :date AND CreatedAt BETWEEN :start and :end"
- var attributeNames map[string]*string
- if len(l.filter.condParams.attrNames) > 0 {
- attributeNames = aws.StringMap(l.filter.condParams.attrNames)
- }
dateLoop:
for i, date := range l.dates {
@@ -1208,7 +1350,7 @@ dateLoop:
attributes[fmt.Sprintf(":eventType%d", i)] = eventType
}
maps.Copy(attributes, l.filter.condParams.attrValues)
- attributeValues, err := dynamodbattribute.MarshalMap(attributes)
+ attributeValues, err := attributevalue.MarshalMap(attributes)
if err != nil {
return nil, trace.Wrap(err)
}
@@ -1216,16 +1358,29 @@ dateLoop:
input := dynamodb.QueryInput{
KeyConditionExpression: aws.String(query),
TableName: aws.String(l.tableName),
- ExpressionAttributeNames: attributeNames,
+ ExpressionAttributeNames: l.filter.condParams.attrNames,
ExpressionAttributeValues: attributeValues,
IndexName: aws.String(indexTimeSearchV2),
- ExclusiveStartKey: l.checkpoint.Iterator,
- Limit: aws.Int64(l.left),
+ Limit: aws.Int32(l.left),
FilterExpression: filterExpr,
ScanIndexForward: aws.Bool(l.forward),
}
+
+ if l.checkpoint.Iterator != "" {
+ m := make(map[string]any)
+ err = json.Unmarshal([]byte(l.checkpoint.Iterator), &m)
+ if err != nil {
+ return nil, trace.Wrap(err)
+ }
+
+ input.ExclusiveStartKey, err = attributevalue.MarshalMap(&m)
+ if err != nil {
+ return nil, trace.Wrap(err)
+ }
+ }
+
start := time.Now()
- out, err := l.api.QueryWithContext(ctx, &input)
+ out, err := l.api.Query(ctx, &input)
if err != nil {
return nil, trace.Wrap(err)
}
@@ -1254,12 +1409,12 @@ dateLoop:
// from the same date and the request's iterator to fetch the remainder of the page.
// If the input iterator is empty but the EventKey is not, we need to resume the query from the same date
// and we shouldn't move to the next date.
- if i < len(l.dates)-1 && len(l.checkpoint.Iterator) == 0 && l.checkpoint.EventKey == "" {
+ if i < len(l.dates)-1 && l.checkpoint.Iterator == "" && l.checkpoint.EventKey == "" {
l.checkpoint.Date = l.dates[i+1]
}
return values, nil
}
- if len(l.checkpoint.Iterator) == 0 {
+ if l.checkpoint.Iterator == "" {
continue dateLoop
}
}
@@ -1269,10 +1424,6 @@ dateLoop:
func (l *eventsFetcher) QueryBySessionIDIndex(ctx context.Context, sessionID string, filterExpr *string) (values []event, err error) {
query := "SessionID = :id"
- var attributeNames map[string]*string
- if len(l.filter.condParams.attrNames) > 0 {
- attributeNames = aws.StringMap(l.filter.condParams.attrNames)
- }
attributes := map[string]interface{}{
":id": sessionID,
@@ -1282,23 +1433,35 @@ func (l *eventsFetcher) QueryBySessionIDIndex(ctx context.Context, sessionID str
}
maps.Copy(attributes, l.filter.condParams.attrValues)
- attributeValues, err := dynamodbattribute.MarshalMap(attributes)
+ attributeValues, err := attributevalue.MarshalMap(attributes)
if err != nil {
return nil, trace.Wrap(err)
}
input := dynamodb.QueryInput{
KeyConditionExpression: aws.String(query),
TableName: aws.String(l.tableName),
- ExpressionAttributeNames: attributeNames,
+ ExpressionAttributeNames: l.filter.condParams.attrNames,
ExpressionAttributeValues: attributeValues,
IndexName: nil, // Use primary SessionID index.
- ExclusiveStartKey: l.checkpoint.Iterator,
- Limit: aws.Int64(l.left),
+ Limit: aws.Int32(l.left),
FilterExpression: filterExpr,
ScanIndexForward: aws.Bool(l.forward),
}
+
+ if l.checkpoint.Iterator != "" {
+ m := make(map[string]string)
+ if err = json.Unmarshal([]byte(l.checkpoint.Iterator), &m); err != nil {
+ return nil, trace.Wrap(err)
+ }
+
+ input.ExclusiveStartKey, err = attributevalue.MarshalMap(&m)
+ if err != nil {
+ return nil, trace.Wrap(err)
+ }
+ }
+
start := time.Now()
- out, err := l.api.QueryWithContext(ctx, &input)
+ out, err := l.api.Query(ctx, &input)
if err != nil {
return nil, trace.Wrap(err)
}
diff --git a/lib/events/s3sessions/s3handler.go b/lib/events/s3sessions/s3handler.go
index a83d30b890de9..3c4d878a1d10d 100644
--- a/lib/events/s3sessions/s3handler.go
+++ b/lib/events/s3sessions/s3handler.go
@@ -20,8 +20,10 @@ package s3sessions
import (
"context"
+ "crypto/tls"
"fmt"
"io"
+ "net/http"
"net/url"
"path"
"sort"
@@ -29,13 +31,11 @@ import (
"strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- awssession "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3iface"
- "github.com/aws/aws-sdk-go/service/s3/s3manager"
- "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
@@ -43,7 +43,7 @@ import (
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
- s3metrics "github.com/gravitational/teleport/lib/observability/metrics/s3"
+ awsmetrics "github.com/gravitational/teleport/lib/observability/metrics/aws"
"github.com/gravitational/teleport/lib/session"
awsutils "github.com/gravitational/teleport/lib/utils/aws"
)
@@ -77,10 +77,10 @@ type Config struct {
Endpoint string
// ACL is the canned ACL to send to S3
ACL string
- // Session is an optional existing AWS client session
- Session *awssession.Session
- // Credentials if supplied are used in tests or with External Audit Storage.
- Credentials *credentials.Credentials
+ // AWSConfig is an optional existing AWS client configuration
+ AWSConfig *aws.Config
+ // CredentialsProvider if supplied is used in tests or with External Audit Storage.
+ CredentialsProvider aws.CredentialsProvider
// SSEKMSKey specifies the optional custom CMK used for KMS SSE.
SSEKMSKey string
@@ -156,38 +156,40 @@ func (s *Config) CheckAndSetDefaults() error {
if s.Bucket == "" {
return trace.BadParameter("missing parameter Bucket")
}
- if s.Session == nil {
- awsConfig := aws.Config{
- UseFIPSEndpoint: events.FIPSProtoStateToAWSState(s.UseFIPSEndpoint),
- }
- if s.Region != "" {
- awsConfig.Region = aws.String(s.Region)
- }
- if s.Endpoint != "" {
- awsConfig.Endpoint = aws.String(s.Endpoint)
- awsConfig.S3ForcePathStyle = aws.Bool(true)
+
+ if s.AWSConfig == nil {
+ var err error
+ opts := []func(*config.LoadOptions) error{
+ config.WithRegion(s.Region),
}
+
if s.Insecure {
- awsConfig.DisableSSL = aws.Bool(s.Insecure)
- }
- if s.Credentials != nil {
- awsConfig.Credentials = s.Credentials
+ opts = append(opts, config.WithHTTPClient(&http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ },
+ }))
+ } else {
+ hc, err := defaults.HTTPClient()
+ if err != nil {
+ return trace.Wrap(err)
+ }
+
+ opts = append(opts, config.WithHTTPClient(hc))
}
- hc, err := defaults.HTTPClient()
- if err != nil {
- return trace.Wrap(err)
+
+ if s.CredentialsProvider != nil {
+ opts = append(opts, config.WithCredentialsProvider(s.CredentialsProvider))
}
- awsConfig.HTTPClient = hc
- sess, err := awssession.NewSessionWithOptions(awssession.Options{
- SharedConfigState: awssession.SharedConfigEnable,
- Config: awsConfig,
- })
+ opts = append(opts, config.WithAPIOptions(awsmetrics.MetricsMiddleware()))
+
+ awsConfig, err := config.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return trace.Wrap(err)
}
- s.Session = sess
+ s.AWSConfig = &awsConfig
}
return nil
}
@@ -198,20 +200,15 @@ func NewHandler(ctx context.Context, cfg Config) (*Handler, error) {
return nil, trace.Wrap(err)
}
- client, err := s3metrics.NewAPIMetrics(s3.New(cfg.Session))
- if err != nil {
- return nil, trace.Wrap(err)
- }
-
- uploader, err := s3metrics.NewUploadAPIMetrics(s3manager.NewUploader(cfg.Session))
- if err != nil {
- return nil, trace.Wrap(err)
- }
+ // Create S3 client with custom options
+ client := s3.NewFromConfig(*cfg.AWSConfig, func(o *s3.Options) {
+ if cfg.Endpoint != "" {
+ o.UsePathStyle = true
+ }
+ })
- downloader, err := s3metrics.NewDownloadAPIMetrics(s3manager.NewDownloader(cfg.Session))
- if err != nil {
- return nil, trace.Wrap(err)
- }
+ uploader := manager.NewUploader(client)
+ downloader := manager.NewDownloader(client)
h := &Handler{
Entry: log.WithFields(log.Fields{
@@ -222,6 +219,7 @@ func NewHandler(ctx context.Context, cfg Config) (*Handler, error) {
downloader: downloader,
client: client,
}
+
start := time.Now()
h.Infof("Setting up bucket %q, sessions path %q in region %q.", h.Bucket, h.Path, h.Region)
if err := h.ensureBucket(ctx); err != nil {
@@ -237,9 +235,9 @@ type Handler struct {
Config
// Entry is a logging entry
*log.Entry
- uploader s3manageriface.UploaderAPI
- downloader s3manageriface.DownloaderAPI
- client s3iface.S3API
+ uploader *manager.Uploader
+ downloader *manager.Downloader
+ client *s3.Client
}
// Close releases connection and resources associated with log if any
@@ -250,25 +248,23 @@ func (h *Handler) Close() error {
// Upload uploads object to S3 bucket, reads the contents of the object from reader
// and returns the target S3 bucket path in case of successful upload.
func (h *Handler) Upload(ctx context.Context, sessionID session.ID, reader io.Reader) (string, error) {
- var err error
path := h.path(sessionID)
- uploadInput := &s3manager.UploadInput{
+ uploadInput := &s3.PutObjectInput{
Bucket: aws.String(h.Bucket),
Key: aws.String(path),
Body: reader,
}
if !h.Config.DisableServerSideEncryption {
- uploadInput.ServerSideEncryption = aws.String(s3.ServerSideEncryptionAwsKms)
-
+ uploadInput.ServerSideEncryption = awstypes.ServerSideEncryptionAwsKms
if h.Config.SSEKMSKey != "" {
uploadInput.SSEKMSKeyId = aws.String(h.Config.SSEKMSKey)
}
}
if h.Config.ACL != "" {
- uploadInput.ACL = aws.String(h.Config.ACL)
+ uploadInput.ACL = awstypes.ObjectCannedACL(h.Config.ACL)
}
- _, err = h.uploader.UploadWithContext(ctx, uploadInput)
+ _, err := h.uploader.Upload(ctx, uploadInput)
if err != nil {
return "", awsutils.ConvertS3Error(err)
}
@@ -288,7 +284,7 @@ func (h *Handler) Download(ctx context.Context, sessionID session.ID, writer io.
h.Debugf("Downloading %v/%v [%v].", h.Bucket, h.path(sessionID), versionID)
- written, err := h.downloader.DownloadWithContext(ctx, writer, &s3.GetObjectInput{
+ _, err = h.downloader.Download(ctx, writer, &s3.GetObjectInput{
Bucket: aws.String(h.Bucket),
Key: aws.String(h.path(sessionID)),
VersionId: aws.String(versionID),
@@ -296,9 +292,6 @@ func (h *Handler) Download(ctx context.Context, sessionID session.ID, writer io.
if err != nil {
return awsutils.ConvertS3Error(err)
}
- if written == 0 {
- return trace.NotFound("recording for %v is not found", sessionID)
- }
return nil
}
@@ -315,24 +308,24 @@ type versionID struct {
func (h *Handler) getOldestVersion(ctx context.Context, bucket string, prefix string) (string, error) {
var versions []versionID
- // Get all versions of this object.
- err := h.client.ListObjectVersionsPagesWithContext(ctx, &s3.ListObjectVersionsInput{
+ paginator := s3.NewListObjectVersionsPaginator(h.client, &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
- }, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool {
+ })
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
+ if err != nil {
+ return "", awsutils.ConvertS3Error(err)
+ }
for _, v := range page.Versions {
versions = append(versions, versionID{
- ID: *v.VersionId,
+ ID: aws.ToString(v.VersionId),
Timestamp: *v.LastModified,
})
}
-
- // Returning false stops iteration, stop iteration upon last page.
- return !lastPage
- })
- if err != nil {
- return "", awsutils.ConvertS3Error(err)
}
+
if len(versions) == 0 {
return "", trace.NotFound("%v/%v not found", bucket, prefix)
}
@@ -347,23 +340,28 @@ func (h *Handler) getOldestVersion(ctx context.Context, bucket string, prefix st
// delete bucket deletes bucket and all it's contents and is used in tests
func (h *Handler) deleteBucket(ctx context.Context) error {
// first, list and delete all the objects in the bucket
- out, err := h.client.ListObjectVersionsWithContext(ctx, &s3.ListObjectVersionsInput{
+ paginator := s3.NewListObjectVersionsPaginator(h.client, &s3.ListObjectVersionsInput{
Bucket: aws.String(h.Bucket),
})
- if err != nil {
- return awsutils.ConvertS3Error(err)
- }
- for _, ver := range out.Versions {
- _, err := h.client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
- Bucket: aws.String(h.Bucket),
- Key: ver.Key,
- VersionId: ver.VersionId,
- })
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
if err != nil {
return awsutils.ConvertS3Error(err)
}
+ for _, ver := range page.Versions {
+ _, err := h.client.DeleteObject(ctx, &s3.DeleteObjectInput{
+ Bucket: aws.String(h.Bucket),
+ Key: ver.Key,
+ VersionId: ver.VersionId,
+ })
+ if err != nil {
+ return awsutils.ConvertS3Error(err)
+ }
+ }
}
- _, err = h.client.DeleteBucketWithContext(ctx, &s3.DeleteBucketInput{
+
+ _, err := h.client.DeleteBucket(ctx, &s3.DeleteBucketInput{
Bucket: aws.String(h.Bucket),
})
return awsutils.ConvertS3Error(err)
@@ -382,7 +380,7 @@ func (h *Handler) fromPath(p string) session.ID {
// ensureBucket makes sure bucket exists, and if it does not, creates it
func (h *Handler) ensureBucket(ctx context.Context) error {
- _, err := h.client.HeadBucketWithContext(ctx, &s3.HeadBucketInput{
+ _, err := h.client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(h.Bucket),
})
err = awsutils.ConvertS3Error(err)
@@ -396,26 +394,26 @@ func (h *Handler) ensureBucket(ctx context.Context) error {
}
input := &s3.CreateBucketInput{
Bucket: aws.String(h.Bucket),
- ACL: aws.String("private"),
+ ACL: awstypes.BucketCannedACLPrivate,
}
- _, err = h.client.CreateBucketWithContext(ctx, input)
+ _, err = h.client.CreateBucket(ctx, input)
err = awsutils.ConvertS3Error(err, fmt.Sprintf("bucket %v already exists", aws.String(h.Bucket)))
if err != nil {
if !trace.IsAlreadyExists(err) {
return trace.Wrap(err)
}
+
// if this client has not created the bucket, don't reconfigure it
return nil
}
// Turn on versioning.
- ver := &s3.PutBucketVersioningInput{
+ _, err = h.client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{
Bucket: aws.String(h.Bucket),
- VersioningConfiguration: &s3.VersioningConfiguration{
- Status: aws.String("Enabled"),
+ VersioningConfiguration: &awstypes.VersioningConfiguration{
+ Status: awstypes.BucketVersioningStatusEnabled,
},
- }
- _, err = h.client.PutBucketVersioningWithContext(ctx, ver)
+ })
err = awsutils.ConvertS3Error(err, fmt.Sprintf("failed to set versioning state for bucket %q", h.Bucket))
if err != nil {
return trace.Wrap(err)
@@ -423,17 +421,19 @@ func (h *Handler) ensureBucket(ctx context.Context) error {
// Turn on server-side encryption for the bucket.
if !h.DisableServerSideEncryption {
- _, err = h.client.PutBucketEncryptionWithContext(ctx, &s3.PutBucketEncryptionInput{
+ _, err = h.client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{
Bucket: aws.String(h.Bucket),
- ServerSideEncryptionConfiguration: &s3.ServerSideEncryptionConfiguration{
- Rules: []*s3.ServerSideEncryptionRule{{
- ApplyServerSideEncryptionByDefault: &s3.ServerSideEncryptionByDefault{
- SSEAlgorithm: aws.String(s3.ServerSideEncryptionAwsKms),
+ ServerSideEncryptionConfiguration: &awstypes.ServerSideEncryptionConfiguration{
+ Rules: []awstypes.ServerSideEncryptionRule{
+ {
+ ApplyServerSideEncryptionByDefault: &awstypes.ServerSideEncryptionByDefault{
+ SSEAlgorithm: awstypes.ServerSideEncryptionAwsKms,
+ },
},
- }},
+ },
},
})
- err = awsutils.ConvertS3Error(err, fmt.Sprintf("failed to set versioning state for bucket %q", h.Bucket))
+ err = awsutils.ConvertS3Error(err, fmt.Sprintf("failed to set encryption state for bucket %q", h.Bucket))
if err != nil {
return trace.Wrap(err)
}
diff --git a/lib/events/s3sessions/s3handler_thirdparty_test.go b/lib/events/s3sessions/s3handler_thirdparty_test.go
index b379446974bec..7cbecb1e92ca8 100644
--- a/lib/events/s3sessions/s3handler_thirdparty_test.go
+++ b/lib/events/s3sessions/s3handler_thirdparty_test.go
@@ -24,7 +24,9 @@ import (
"net/http/httptest"
"testing"
- "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/johannesboyne/gofakes3"
@@ -41,12 +43,31 @@ func TestThirdpartyStreams(t *testing.T) {
backend := s3mem.New(s3mem.WithTimeSource(timeSource))
faker := gofakes3.New(backend, gofakes3.WithLogger(gofakes3.GlobalLog()))
server := httptest.NewServer(faker.Server())
+ defer server.Close()
+
+ bucketName := fmt.Sprintf("teleport-test-%v", uuid.New().String())
+
+ config := aws.Config{
+ Credentials: credentials.NewStaticCredentialsProvider("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""),
+ Region: "us-west-1",
+ BaseEndpoint: aws.String(server.URL),
+ }
+
+ s3Client := s3.NewFromConfig(config, func(o *s3.Options) {
+ o.UsePathStyle = true
+ })
+
+ // Create the bucket.
+ _, err := s3Client.CreateBucket(context.Background(), &s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ require.NoError(t, err)
handler, err := NewHandler(context.Background(), Config{
- Credentials: credentials.NewStaticCredentials("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""),
+ AWSConfig: &config,
Region: "us-west-1",
Path: "/test/",
- Bucket: fmt.Sprintf("teleport-test-%v", uuid.New().String()),
+ Bucket: bucketName,
Endpoint: server.URL,
DisableServerSideEncryption: true,
})
diff --git a/lib/events/s3sessions/s3stream.go b/lib/events/s3sessions/s3stream.go
index ec04d7ae9d761..3ae8cbdea8f87 100644
--- a/lib/events/s3sessions/s3stream.go
+++ b/lib/events/s3sessions/s3stream.go
@@ -20,6 +20,8 @@ package s3sessions
import (
"context"
+ "crypto/md5"
+ "encoding/base64"
"fmt"
"io"
"net/url"
@@ -27,14 +29,14 @@ import (
"strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
"github.com/gravitational/teleport"
- "github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/session"
awsutils "github.com/gravitational/teleport/lib/utils/aws"
@@ -49,34 +51,34 @@ func (h *Handler) CreateUpload(ctx context.Context, sessionID session.ID) (*even
Key: aws.String(h.path(sessionID)),
}
if !h.Config.DisableServerSideEncryption {
- input.ServerSideEncryption = aws.String(s3.ServerSideEncryptionAwsKms)
+ input.ServerSideEncryption = types.ServerSideEncryptionAwsKms
if h.Config.SSEKMSKey != "" {
input.SSEKMSKeyId = aws.String(h.Config.SSEKMSKey)
}
}
if h.Config.ACL != "" {
- input.ACL = aws.String(h.Config.ACL)
+ input.ACL = types.ObjectCannedACL(h.Config.ACL)
}
- resp, err := h.client.CreateMultipartUploadWithContext(ctx, input)
+ resp, err := h.client.CreateMultipartUpload(ctx, input)
if err != nil {
return nil, trace.Wrap(awsutils.ConvertS3Error(err), "CreateMultiPartUpload session(%v)", sessionID)
}
h.WithFields(logrus.Fields{
- "upload": aws.StringValue(resp.UploadId),
+ "upload": aws.ToString(resp.UploadId),
"session": sessionID,
- "key": aws.StringValue(resp.Key),
+ "key": aws.ToString(resp.Key),
}).Infof("Created upload in %v", time.Since(start))
- return &events.StreamUpload{SessionID: sessionID, ID: aws.StringValue(resp.UploadId)}, nil
+ return &events.StreamUpload{SessionID: sessionID, ID: aws.ToString(resp.UploadId)}, nil
}
// UploadPart uploads part
func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, partNumber int64, partBody io.ReadSeeker) (*events.StreamPart, error) {
// This upload exceeded maximum number of supported parts, error now.
- if partNumber > s3manager.MaxUploadParts {
+ if partNumber > int64(s3manager.MaxUploadParts) {
return nil, trace.LimitExceeded(
"exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", s3manager.MaxUploadParts)
}
@@ -89,16 +91,30 @@ func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, pa
"key": uploadKey,
})
+ // Calculate the content MD5 hash to be included in the request. This is required for S3 buckets with Object Lock enabled.
+ hash := md5.New()
+ if _, err := io.Copy(hash, partBody); err != nil {
+ return nil, trace.Wrap(err, "failed to calculate content MD5 hash")
+ }
+ md5sum := base64.StdEncoding.EncodeToString(hash.Sum(nil))
+
+ // Reset the partBody reader to the beginning before passing it the params.
+ // This is necessary because after calculating the md5 hash the partBody reader will have been moved to the end of the data.
+ if _, err := partBody.Seek(0, io.SeekStart); err != nil {
+ return nil, trace.Wrap(err, "failed to reset part body reader to beginning")
+ }
+
params := &s3.UploadPartInput{
Bucket: aws.String(h.Bucket),
UploadId: aws.String(upload.ID),
Key: aws.String(uploadKey),
Body: partBody,
- PartNumber: aws.Int64(partNumber),
+ PartNumber: aws.Int32(int32(partNumber)),
+ ContentMD5: aws.String(md5sum),
}
log.Debugf("Uploading part %v", partNumber)
- resp, err := h.client.UploadPartWithContext(ctx, params)
+ resp, err := h.client.UploadPart(ctx, params)
if err != nil {
return nil, trace.Wrap(awsutils.ConvertS3Error(err),
"UploadPart(upload %v) part(%v) session(%v)", upload.ID, partNumber, upload.SessionID)
@@ -111,7 +127,7 @@ func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, pa
// the part we just uploaded, however.
log.Infof("Uploaded part %v in %v", partNumber, time.Since(start))
return &events.StreamPart{
- ETag: aws.StringValue(resp.ETag),
+ ETag: aws.ToString(resp.ETag),
Number: partNumber,
LastModified: time.Now(),
}, nil
@@ -130,7 +146,7 @@ func (h *Handler) abortUpload(ctx context.Context, upload events.StreamUpload) e
UploadId: aws.String(upload.ID),
}
log.Debug("Aborting upload")
- _, err := h.client.AbortMultipartUploadWithContext(ctx, req)
+ _, err := h.client.AbortMultipartUpload(ctx, req)
if err != nil {
return awsutils.ConvertS3Error(err)
}
@@ -166,11 +182,11 @@ func (h *Handler) CompleteUpload(ctx context.Context, upload events.StreamUpload
return parts[i].Number < parts[j].Number
})
- completedParts := make([]*s3.CompletedPart, len(parts))
+ completedParts := make([]types.CompletedPart, len(parts))
for i := range parts {
- completedParts[i] = &s3.CompletedPart{
+ completedParts[i] = types.CompletedPart{
ETag: aws.String(parts[i].ETag),
- PartNumber: aws.Int64(parts[i].Number),
+ PartNumber: aws.Int32(int32(parts[i].Number)),
}
}
@@ -179,9 +195,9 @@ func (h *Handler) CompleteUpload(ctx context.Context, upload events.StreamUpload
Bucket: aws.String(h.Bucket),
Key: aws.String(uploadKey),
UploadId: aws.String(upload.ID),
- MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts},
+ MultipartUpload: &types.CompletedMultipartUpload{Parts: completedParts},
}
- _, err := h.client.CompleteMultipartUploadWithContext(ctx, params)
+ _, err := h.client.CompleteMultipartUpload(ctx, params)
if err != nil {
return trace.Wrap(awsutils.ConvertS3Error(err),
"CompleteMultipartUpload(upload %v) session(%v)", upload.ID, upload.SessionID)
@@ -202,29 +218,27 @@ func (h *Handler) ListParts(ctx context.Context, upload events.StreamUpload) ([]
log.Debug("Listing parts for upload")
var parts []events.StreamPart
- var partNumberMarker *int64
- for i := 0; i < defaults.MaxIterationLimit; i++ {
- re, err := h.client.ListPartsWithContext(ctx, &s3.ListPartsInput{
- Bucket: aws.String(h.Bucket),
- Key: aws.String(uploadKey),
- UploadId: aws.String(upload.ID),
- PartNumberMarker: partNumberMarker,
- })
+
+ paginator := s3.NewListPartsPaginator(h.client, &s3.ListPartsInput{
+ Bucket: aws.String(h.Bucket),
+ Key: aws.String(uploadKey),
+ UploadId: aws.String(upload.ID),
+ })
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
if err != nil {
return nil, awsutils.ConvertS3Error(err)
}
- for _, part := range re.Parts {
+ for _, part := range page.Parts {
parts = append(parts, events.StreamPart{
- Number: aws.Int64Value(part.PartNumber),
- ETag: aws.StringValue(part.ETag),
- LastModified: aws.TimeValue(part.LastModified),
+ Number: int64(aws.ToInt32(part.PartNumber)),
+ ETag: aws.ToString(part.ETag),
+ LastModified: aws.ToTime(part.LastModified),
})
}
- if !aws.BoolValue(re.IsTruncated) {
- break
- }
- partNumberMarker = re.NextPartNumberMarker
}
+
// Parts must be sorted in PartNumber order.
sort.Slice(parts, func(i, j int) bool {
return parts[i].Number < parts[j].Number
@@ -240,31 +254,23 @@ func (h *Handler) ListUploads(ctx context.Context) ([]events.StreamUpload, error
prefix = &trimmed
}
var uploads []events.StreamUpload
- var keyMarker *string
- var uploadIDMarker *string
- for i := 0; i < defaults.MaxIterationLimit; i++ {
- input := &s3.ListMultipartUploadsInput{
- Bucket: aws.String(h.Bucket),
- Prefix: prefix,
- KeyMarker: keyMarker,
- UploadIdMarker: uploadIDMarker,
- }
- re, err := h.client.ListMultipartUploadsWithContext(ctx, input)
+ paginator := s3.NewListMultipartUploadsPaginator(h.client, &s3.ListMultipartUploadsInput{
+ Bucket: aws.String(h.Bucket),
+ Prefix: prefix,
+ })
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
if err != nil {
return nil, awsutils.ConvertS3Error(err)
}
- for _, upload := range re.Uploads {
+ for _, upload := range page.Uploads {
uploads = append(uploads, events.StreamUpload{
- ID: aws.StringValue(upload.UploadId),
- SessionID: h.fromPath(aws.StringValue(upload.Key)),
- Initiated: aws.TimeValue(upload.Initiated),
+ ID: aws.ToString(upload.UploadId),
+ SessionID: h.fromPath(aws.ToString(upload.Key)),
+ Initiated: aws.ToTime(upload.Initiated),
})
}
- if !aws.BoolValue(re.IsTruncated) {
- break
- }
- keyMarker = re.NextKeyMarker
- uploadIDMarker = re.NextUploadIdMarker
}
sort.Slice(uploads, func(i, j int) bool {
diff --git a/lib/observability/metrics/aws/aws.go b/lib/observability/metrics/aws/aws.go
new file mode 100644
index 0000000000000..4c4fdf335a95c
--- /dev/null
+++ b/lib/observability/metrics/aws/aws.go
@@ -0,0 +1,104 @@
+/*
+ * Teleport
+ * Copyright (C) 2024 Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+package aws
+
+import (
+ "context"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/gravitational/teleport"
+ "github.com/gravitational/teleport/lib/observability/metrics"
+)
+
+var (
+ apiRequestsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: teleport.MetricNamespace,
+ Name: "aws_sdk_requests_total",
+ Help: "Total number of requests to the AWS API",
+ },
+ []string{"service", "operation"},
+ )
+ apiRequests = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: teleport.MetricNamespace,
+ Name: "aws_sdk_requests",
+ Help: "Number of requests to the AWS API by result",
+ },
+ []string{"service", "operation", "result"},
+ )
+ apiRequestLatencies = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: teleport.MetricNamespace,
+ Name: "aws_sdk_requests_seconds",
+ Help: "Request latency for the AWS API",
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^15 == 32.768 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 16),
+ },
+ []string{"service", "operation"},
+ )
+)
+
+func init() {
+ _ = metrics.RegisterPrometheusCollectors(apiRequests, apiRequestsTotal, apiRequestLatencies)
+}
+
+// MetricsMiddleware returns middleware that can be used to capture
+// prometheus metrics for interacting with the AWS API.
+func MetricsMiddleware() []func(stack *middleware.Stack) error {
+ type timestampKey struct{}
+
+ return []func(s *middleware.Stack) error{
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "AWSMetricsBefore",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ return next.HandleInitialize(context.WithValue(ctx, timestampKey{}, time.Now()), in)
+ }), middleware.Before)
+ },
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "AWSMetricsAfter",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ out, md, err := next.HandleInitialize(ctx, in)
+
+ result := "success"
+ if err != nil {
+ result = "error"
+ }
+
+ then := ctx.Value(timestampKey{}).(time.Time)
+ service := awsmiddleware.GetServiceID(ctx)
+ operation := awsmiddleware.GetOperationName(ctx)
+ latency := time.Since(then).Seconds()
+
+ apiRequestsTotal.WithLabelValues(service, operation).Inc()
+ apiRequestLatencies.WithLabelValues(service, operation).Observe(latency)
+ apiRequests.WithLabelValues(service, operation, result).Inc()
+
+ return out, md, err
+ }), middleware.After)
+ },
+ }
+}
diff --git a/lib/observability/metrics/dynamo/api.go b/lib/observability/metrics/dynamo/api.go
deleted file mode 100644
index bd065ee2acf50..0000000000000
--- a/lib/observability/metrics/dynamo/api.go
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2023 Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package dynamo
-
-import (
- "context"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
- "github.com/gravitational/trace"
-
- "github.com/gravitational/teleport/lib/observability/metrics"
-)
-
-// APIMetrics wraps a dynamodbiface.DynamoDBAPI implementation and
-// reports statistics about the dynamo api operations
-type APIMetrics struct {
- dynamodbiface.DynamoDBAPI
- tableType TableType
-}
-
-// NewAPIMetrics returns a new APIMetrics for the provided TableType
-func NewAPIMetrics(tableType TableType, api dynamodbiface.DynamoDBAPI) (*APIMetrics, error) {
- if err := metrics.RegisterPrometheusCollectors(dynamoCollectors...); err != nil {
- return nil, trace.Wrap(err)
- }
-
- return &APIMetrics{
- DynamoDBAPI: api,
- tableType: tableType,
- }, nil
-}
-
-func (m *APIMetrics) DescribeTimeToLiveWithContext(ctx context.Context, input *dynamodb.DescribeTimeToLiveInput, opts ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.DescribeTimeToLiveWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "describe_ttl", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) UpdateTimeToLiveWithContext(ctx context.Context, input *dynamodb.UpdateTimeToLiveInput, opts ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.UpdateTimeToLiveWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "update_ttl", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) DeleteItemWithContext(ctx context.Context, input *dynamodb.DeleteItemInput, opts ...request.Option) (*dynamodb.DeleteItemOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.DeleteItemWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "delete_item", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) GetItemWithContext(ctx context.Context, input *dynamodb.GetItemInput, opts ...request.Option) (*dynamodb.GetItemOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.GetItemWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "get_item", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) PutItemWithContext(ctx context.Context, input *dynamodb.PutItemInput, opts ...request.Option) (*dynamodb.PutItemOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.PutItemWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "put_item", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) UpdateItemWithContext(ctx context.Context, input *dynamodb.UpdateItemInput, opts ...request.Option) (*dynamodb.UpdateItemOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.UpdateItemWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "update_item", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) DeleteTableWithContext(ctx context.Context, input *dynamodb.DeleteTableInput, opts ...request.Option) (*dynamodb.DeleteTableOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.DeleteTableWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "delete_table", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) BatchWriteItemWithContext(ctx context.Context, input *dynamodb.BatchWriteItemInput, opts ...request.Option) (*dynamodb.BatchWriteItemOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.BatchWriteItemWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "batch_write_item", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) ScanWithContext(ctx context.Context, input *dynamodb.ScanInput, opts ...request.Option) (*dynamodb.ScanOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.ScanWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "scan", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) CreateTableWithContext(ctx context.Context, input *dynamodb.CreateTableInput, opts ...request.Option) (*dynamodb.CreateTableOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.CreateTableWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "create_table", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) DescribeTableWithContext(ctx context.Context, input *dynamodb.DescribeTableInput, opts ...request.Option) (*dynamodb.DescribeTableOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.DescribeTableWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "describe_table", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *APIMetrics) QueryWithContext(ctx context.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBAPI.QueryWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "query", err, time.Since(start).Seconds())
-
- return output, err
-}
diff --git a/lib/observability/metrics/dynamo/dynamo.go b/lib/observability/metrics/dynamo/dynamo.go
index 5971f066f6697..61594bfdbb892 100644
--- a/lib/observability/metrics/dynamo/dynamo.go
+++ b/lib/observability/metrics/dynamo/dynamo.go
@@ -19,7 +19,14 @@
package dynamo
import (
+ "context"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
"github.com/prometheus/client_golang/prometheus"
+
+ "github.com/gravitational/teleport/lib/observability/metrics"
)
var (
@@ -33,7 +40,7 @@ var (
apiRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "dynamo_requests",
- Help: "Number of failed requests to the DynamoDB API by result",
+ Help: "Number of requests to the DynamoDB API by result",
},
[]string{"type", "operation", "result"},
)
@@ -47,14 +54,12 @@ var (
},
[]string{"type", "operation"},
)
-
- dynamoCollectors = []prometheus.Collector{
- apiRequests,
- apiRequestsTotal,
- apiRequestLatencies,
- }
)
+func init() {
+ _ = metrics.RegisterPrometheusCollectors(apiRequests, apiRequestsTotal, apiRequestLatencies)
+}
+
// TableType indicates which type of table metrics are being calculated for
type TableType string
@@ -65,15 +70,40 @@ const (
Events TableType = "events"
)
-// recordMetrics updates the set of dynamo api metrics
-func recordMetrics(tableType TableType, operation string, err error, latency float64) {
- labels := []string{string(tableType), operation}
- apiRequestsTotal.WithLabelValues(labels...).Inc()
- apiRequestLatencies.WithLabelValues(labels...).Observe(latency)
+// MetricsMiddleware returns middleware that can be used to capture
+// prometheus metrics for interacting with DynamoDB.
+func MetricsMiddleware(tableType TableType) []func(stack *middleware.Stack) error {
+ type timestampKey struct{}
- result := "success"
- if err != nil {
- result = "error"
+ return []func(s *middleware.Stack) error{
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "DynamoMetricsBefore",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ return next.HandleInitialize(context.WithValue(ctx, timestampKey{}, time.Now()), in)
+ }), middleware.Before)
+ },
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "DynamoMetricsAfter",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ out, md, err := next.HandleInitialize(ctx, in)
+
+ result := "success"
+ if err != nil {
+ result = "error"
+ }
+
+ then := ctx.Value(timestampKey{}).(time.Time)
+ operation := awsmiddleware.GetOperationName(ctx)
+ latency := time.Since(then).Seconds()
+
+ apiRequestsTotal.WithLabelValues(string(tableType), operation).Inc()
+ apiRequestLatencies.WithLabelValues(string(tableType), operation).Observe(latency)
+ apiRequests.WithLabelValues(string(tableType), operation, result).Inc()
+
+ return out, md, err
+ }), middleware.After)
+ },
}
- apiRequests.WithLabelValues(append(labels, result)...).Inc()
}
diff --git a/lib/observability/metrics/dynamo/streams.go b/lib/observability/metrics/dynamo/streams.go
deleted file mode 100644
index 62be3775008fe..0000000000000
--- a/lib/observability/metrics/dynamo/streams.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2023 Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package dynamo
-
-import (
- "context"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams"
- "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface"
- "github.com/gravitational/trace"
-
- "github.com/gravitational/teleport/lib/observability/metrics"
-)
-
-// StreamsMetricsAPI wraps a dynamodbstreamsiface.DynamoDBStreamsAPI implementation and
-// reports statistics about the dynamo api operations
-type StreamsMetricsAPI struct {
- dynamodbstreamsiface.DynamoDBStreamsAPI
- tableType TableType
-}
-
-// NewStreamsMetricsAPI returns a new StreamsMetricsAPI for the provided TableType
-func NewStreamsMetricsAPI(tableType TableType, api dynamodbstreamsiface.DynamoDBStreamsAPI) (*StreamsMetricsAPI, error) {
- if err := metrics.RegisterPrometheusCollectors(dynamoCollectors...); err != nil {
- return nil, trace.Wrap(err)
- }
-
- return &StreamsMetricsAPI{
- DynamoDBStreamsAPI: api,
- tableType: tableType,
- }, nil
-}
-
-func (m *StreamsMetricsAPI) DescribeStreamWithContext(ctx context.Context, input *dynamodbstreams.DescribeStreamInput, opts ...request.Option) (*dynamodbstreams.DescribeStreamOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBStreamsAPI.DescribeStreamWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "describe_stream", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *StreamsMetricsAPI) GetShardIteratorWithContext(ctx context.Context, input *dynamodbstreams.GetShardIteratorInput, opts ...request.Option) (*dynamodbstreams.GetShardIteratorOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBStreamsAPI.GetShardIteratorWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "get_shard_iterator", err, time.Since(start).Seconds())
-
- return output, err
-}
-
-func (m *StreamsMetricsAPI) GetRecordsWithContext(ctx context.Context, input *dynamodbstreams.GetRecordsInput, opts ...request.Option) (*dynamodbstreams.GetRecordsOutput, error) {
- start := time.Now()
- output, err := m.DynamoDBStreamsAPI.GetRecordsWithContext(ctx, input, opts...)
-
- recordMetrics(m.tableType, "get_records", err, time.Since(start).Seconds())
-
- return output, err
-}
diff --git a/lib/observability/metrics/s3/api.go b/lib/observability/metrics/s3/api.go
deleted file mode 100644
index 43231ab7c321a..0000000000000
--- a/lib/observability/metrics/s3/api.go
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2023 Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-package s3
-
-import (
- "context"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/aws/aws-sdk-go/service/s3/s3iface"
- "github.com/gravitational/trace"
-
- "github.com/gravitational/teleport/lib/observability/metrics"
-)
-
-type APIMetrics struct {
- s3iface.S3API
-}
-
-func NewAPIMetrics(api s3iface.S3API) (*APIMetrics, error) {
- if err := metrics.RegisterPrometheusCollectors(s3Collectors...); err != nil {
- return nil, trace.Wrap(err)
- }
-
- return &APIMetrics{S3API: api}, nil
-}
-
-func (m *APIMetrics) ListObjectVersionsPagesWithContext(ctx context.Context, input *s3.ListObjectVersionsInput, f func(*s3.ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
- start := time.Now()
- err := m.S3API.ListObjectVersionsPagesWithContext(ctx, input, f, opts...)
-
- recordMetrics("list_object_versions_pages", err, time.Since(start).Seconds())
- return err
-}
-
-func (m *APIMetrics) ListObjectVersionsWithContext(ctx context.Context, input *s3.ListObjectVersionsInput, opts ...request.Option) (*s3.ListObjectVersionsOutput, error) {
- start := time.Now()
- output, err := m.S3API.ListObjectVersionsWithContext(ctx, input, opts...)
-
- recordMetrics("list_object_versions", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) DeleteObjectWithContext(ctx context.Context, input *s3.DeleteObjectInput, opts ...request.Option) (*s3.DeleteObjectOutput, error) {
- start := time.Now()
- output, err := m.S3API.DeleteObjectWithContext(ctx, input, opts...)
-
- recordMetrics("delete_object", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) DeleteBucketWithContext(ctx context.Context, input *s3.DeleteBucketInput, opts ...request.Option) (*s3.DeleteBucketOutput, error) {
- start := time.Now()
- output, err := m.S3API.DeleteBucketWithContext(ctx, input, opts...)
-
- recordMetrics("delete_bucket", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) HeadBucketWithContext(ctx context.Context, input *s3.HeadBucketInput, opts ...request.Option) (*s3.HeadBucketOutput, error) {
- start := time.Now()
- output, err := m.S3API.HeadBucketWithContext(ctx, input, opts...)
-
- recordMetrics("head_bucket", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) CreateBucketWithContext(ctx context.Context, input *s3.CreateBucketInput, opts ...request.Option) (*s3.CreateBucketOutput, error) {
- start := time.Now()
- output, err := m.S3API.CreateBucketWithContext(ctx, input, opts...)
-
- recordMetrics("create_bucket", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) PutBucketVersioningWithContext(ctx context.Context, input *s3.PutBucketVersioningInput, opts ...request.Option) (*s3.PutBucketVersioningOutput, error) {
- start := time.Now()
- output, err := m.S3API.PutBucketVersioningWithContext(ctx, input, opts...)
-
- recordMetrics("put_bucket_versioning", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) PutBucketEncryptionWithContext(ctx context.Context, input *s3.PutBucketEncryptionInput, opts ...request.Option) (*s3.PutBucketEncryptionOutput, error) {
- start := time.Now()
- output, err := m.S3API.PutBucketEncryptionWithContext(ctx, input, opts...)
-
- recordMetrics("put_bucket_encryption", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) CreateMultipartUploadWithContext(ctx context.Context, input *s3.CreateMultipartUploadInput, opts ...request.Option) (*s3.CreateMultipartUploadOutput, error) {
- start := time.Now()
- output, err := m.S3API.CreateMultipartUploadWithContext(ctx, input, opts...)
-
- recordMetrics("create_multipart_upload", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) UploadPartWithContext(ctx context.Context, input *s3.UploadPartInput, opts ...request.Option) (*s3.UploadPartOutput, error) {
- start := time.Now()
- output, err := m.S3API.UploadPartWithContext(ctx, input, opts...)
-
- recordMetrics("upload_part", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) AbortMultipartUploadWithContext(ctx context.Context, input *s3.AbortMultipartUploadInput, opts ...request.Option) (*s3.AbortMultipartUploadOutput, error) {
- start := time.Now()
- output, err := m.S3API.AbortMultipartUploadWithContext(ctx, input, opts...)
-
- recordMetrics("abort_multipart_upload", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) CompleteMultipartUploadWithContext(ctx context.Context, input *s3.CompleteMultipartUploadInput, opts ...request.Option) (*s3.CompleteMultipartUploadOutput, error) {
- start := time.Now()
- output, err := m.S3API.CompleteMultipartUploadWithContext(ctx, input, opts...)
-
- recordMetrics("complete_multipart_upload", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) ListPartsWithContext(ctx context.Context, input *s3.ListPartsInput, opts ...request.Option) (*s3.ListPartsOutput, error) {
- start := time.Now()
- output, err := m.S3API.ListPartsWithContext(ctx, input, opts...)
-
- recordMetrics("list_parts", err, time.Since(start).Seconds())
- return output, err
-}
-
-func (m *APIMetrics) ListMultipartUploadsWithContext(ctx context.Context, input *s3.ListMultipartUploadsInput, opts ...request.Option) (*s3.ListMultipartUploadsOutput, error) {
- start := time.Now()
- output, err := m.S3API.ListMultipartUploadsWithContext(ctx, input, opts...)
-
- recordMetrics("list_multipart_uploads", err, time.Since(start).Seconds())
- return output, err
-}
diff --git a/lib/observability/metrics/s3/s3.go b/lib/observability/metrics/s3/s3.go
index de8f04a33b3c2..a803fc587078b 100644
--- a/lib/observability/metrics/s3/s3.go
+++ b/lib/observability/metrics/s3/s3.go
@@ -19,6 +19,11 @@
package s3
import (
+ "context"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
"github.com/prometheus/client_golang/prometheus"
)
@@ -66,3 +71,42 @@ func recordMetrics(operation string, err error, latency float64) {
}
apiRequests.WithLabelValues(operation, result).Inc()
}
+
+// MetricsMiddleware returns middleware that can be used to capture
+// prometheus metrics for interacting with S3.
+func MetricsMiddleware() []func(stack *middleware.Stack) error {
+ type timestampKey struct{}
+
+ return []func(s *middleware.Stack) error{
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "S3MetricsBefore",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ return next.HandleInitialize(context.WithValue(ctx, timestampKey{}, time.Now()), in)
+ }), middleware.Before)
+ },
+ func(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc(
+ "S3MetricsAfter",
+ func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (middleware.InitializeOutput, middleware.Metadata, error) {
+ out, md, err := next.HandleInitialize(ctx, in)
+
+ result := "success"
+ if err != nil {
+ result = "error"
+ }
+
+ then := ctx.Value(timestampKey{}).(time.Time)
+ service := awsmiddleware.GetServiceID(ctx)
+ operation := awsmiddleware.GetOperationName(ctx)
+ latency := time.Since(then).Seconds()
+
+ apiRequestsTotal.WithLabelValues(service, operation).Inc()
+ apiRequestLatencies.WithLabelValues(service, operation).Observe(latency)
+ apiRequests.WithLabelValues(service, operation, result).Inc()
+
+ return out, md, err
+ }), middleware.After)
+ },
+ }
+}
diff --git a/lib/service/service.go b/lib/service/service.go
index 6de1b7d4ae230..115f4d506ffc1 100644
--- a/lib/service/service.go
+++ b/lib/service/service.go
@@ -46,7 +46,6 @@ import (
"sync/atomic"
"time"
- awscredentials "github.com/aws/aws-sdk-go/aws/credentials"
awssession "github.com/aws/aws-sdk-go/aws/session"
"github.com/google/renameio/v2"
"github.com/google/uuid"
@@ -1594,7 +1593,7 @@ func initAuthUploadHandler(ctx context.Context, auditConfig types.ClusterAuditCo
UseFIPSEndpoint: auditConfig.GetUseFIPSEndpoint(),
}
if externalAuditStorage.IsUsed() {
- config.Credentials = awscredentials.NewCredentials(externalAuditStorage.CredentialsProviderSDKV1())
+ config.CredentialsProvider = externalAuditStorage.CredentialsProvider()
}
if err := config.SetFromURL(uri, auditConfig.Region()); err != nil {
return nil, trace.Wrap(err)
@@ -1687,11 +1686,11 @@ func (process *TeleportProcess) initAuthExternalAuditLog(auditConfig types.Clust
Region: auditConfig.Region(),
EnableContinuousBackups: auditConfig.EnableContinuousBackups(),
EnableAutoScaling: auditConfig.EnableAutoScaling(),
- ReadMinCapacity: auditConfig.ReadMinCapacity(),
- ReadMaxCapacity: auditConfig.ReadMaxCapacity(),
+ ReadMinCapacity: int32(auditConfig.ReadMinCapacity()),
+ ReadMaxCapacity: int32(auditConfig.ReadMaxCapacity()),
ReadTargetValue: auditConfig.ReadTargetValue(),
- WriteMinCapacity: auditConfig.WriteMinCapacity(),
- WriteMaxCapacity: auditConfig.WriteMaxCapacity(),
+ WriteMinCapacity: int32(auditConfig.WriteMinCapacity()),
+ WriteMaxCapacity: int32(auditConfig.WriteMaxCapacity()),
WriteTargetValue: auditConfig.WriteTargetValue(),
RetentionPeriod: auditConfig.RetentionPeriod(),
UseFIPSEndpoint: auditConfig.GetUseFIPSEndpoint(),