diff --git a/CHANGELOG.md b/CHANGELOG.md index ec9b9ba321a..b3cf6d591ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +Release v1.48.12 (2023-12-04) +=== + +### Service Client Updates +* `service/billingconductor`: Updates service API and documentation +* `service/braket`: Updates service API and documentation +* `service/cloud9`: Updates service API and documentation + * This release adds the requirement to include the imageId parameter in the CreateEnvironmentEC2 API call. +* `service/cloudformation`: Updates service waiters + * Including UPDATE_* states as a success status for CreateStack waiter. +* `service/finspace`: Updates service API and documentation +* `service/medialive`: Updates service API and documentation + * Adds support for custom color correction on channels using 3D LUT files. +* `service/servicecatalog-appregistry`: Updates service documentation + Release v1.48.11 (2023-12-01) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 24cb9c6f9b3..ee1ea9b6a6c 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -27612,6 +27612,38 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -27654,6 +27686,166 @@ var awsPartition = partition{ }: endpoint{ Hostname: "signer-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "simspaceweaver": service{ @@ -34761,6 +34953,22 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sms": service{ @@ -42464,12 +42672,42 @@ var awsisoPartition = partition{ }, "redshift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "resource-groups": service{ @@ -43238,9 +43476,24 @@ var awsisobPartition = partition{ }, "redshift": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "resource-groups": service{ diff --git a/aws/version.go b/aws/version.go index a52a0ea2fb2..fdefdf07f5a 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.48.11" +const SDKVersion = "1.48.12" diff --git a/models/apis/billingconductor/2021-07-30/api-2.json b/models/apis/billingconductor/2021-07-30/api-2.json index e0d36eca5d6..88745aa0b3a 100644 --- a/models/apis/billingconductor/2021-07-30/api-2.json +++ b/models/apis/billingconductor/2021-07-30/api-2.json @@ -917,7 +917,8 @@ "BillingGroupArn":{"shape":"BillingGroupArn"}, "BillingPeriodRange":{"shape":"CustomLineItemBillingPeriodRange"}, "Tags":{"shape":"TagMap"}, - "ChargeDetails":{"shape":"CustomLineItemChargeDetails"} + "ChargeDetails":{"shape":"CustomLineItemChargeDetails"}, + "AccountId":{"shape":"AccountId"} } }, "CreateCustomLineItemOutput":{ @@ -1088,7 +1089,8 @@ "BillingGroupArn":{"shape":"BillingGroupArn"}, "CreationTime":{"shape":"Instant"}, "LastModifiedTime":{"shape":"Instant"}, - "AssociationSize":{"shape":"NumberOfAssociations"} + "AssociationSize":{"shape":"NumberOfAssociations"}, + "AccountId":{"shape":"AccountId"} } }, "CustomLineItemName":{ @@ -1156,7 +1158,8 @@ "StartBillingPeriod":{"shape":"BillingPeriod"}, "EndBillingPeriod":{"shape":"BillingPeriod"}, "Arn":{"shape":"CustomLineItemArn"}, - "StartTime":{"shape":"Instant"} + "StartTime":{"shape":"Instant"}, + "AccountId":{"shape":"AccountId"} } }, "DeleteBillingGroupInput":{ @@ -1448,7 +1451,8 @@ "members":{ "Names":{"shape":"CustomLineItemNameList"}, "BillingGroups":{"shape":"BillingGroupArnList"}, - "Arns":{"shape":"CustomLineItemArns"} + "Arns":{"shape":"CustomLineItemArns"}, + "AccountIds":{"shape":"AccountIdList"} } }, "ListCustomLineItemsInput":{ @@ -2159,7 +2163,8 @@ "INVALID_SKU_COMBO", "INVALID_FILTER", "TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS", - "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" + "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP", + "ILLEGAL_ACCOUNT_ID" ] } } diff --git a/models/apis/billingconductor/2021-07-30/docs-2.json b/models/apis/billingconductor/2021-07-30/docs-2.json index 222e8ed20fb..117688542ef 100644 --- a/models/apis/billingconductor/2021-07-30/docs-2.json +++ b/models/apis/billingconductor/2021-07-30/docs-2.json @@ -78,6 +78,9 @@ "AccountIdList$member": null, "BillingGroupListElement$PrimaryAccountId": "
The account ID that serves as the main account in a billing group.
", "CreateBillingGroupInput$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
", + "CreateCustomLineItemInput$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
", + "CustomLineItemListElement$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
", + "CustomLineItemVersionListElement$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
", "ListAccountAssociationsFilter$AccountId": "The Amazon Web Services account ID to filter on.
", "UpdateBillingGroupOutput$PrimaryAccountId": "The account ID that serves as the main account in a billing group.
" } @@ -93,7 +96,8 @@ "refs": { "AccountGrouping$LinkedAccountIds": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
", "AssociateAccountsInput$AccountIds": "The associating array of account IDs.
", - "DisassociateAccountsInput$AccountIds": "The array of account IDs to disassociate.
" + "DisassociateAccountsInput$AccountIds": "The array of account IDs to disassociate.
", + "ListCustomLineItemsFilter$AccountIds": "The Amazon Web Services accounts in which this custom line item will be applied to.
" } }, "AccountName": { diff --git a/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json b/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json index 260bb262df9..d54dad5ad91 100644 --- a/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json +++ b/models/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -184,7 +182,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -219,7 +216,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -230,14 +226,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -251,14 +249,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -267,11 +263,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -282,14 +278,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -303,7 +301,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -323,7 +320,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -334,14 +330,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -352,9 +350,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/apis/braket/2019-09-01/api-2.json b/models/apis/braket/2019-09-01/api-2.json index 4d49ca107af..cf1fa7b5473 100644 --- a/models/apis/braket/2019-09-01/api-2.json +++ b/models/apis/braket/2019-09-01/api-2.json @@ -253,6 +253,29 @@ "scriptModeConfig":{"shape":"ScriptModeConfig"} } }, + "Association":{ + "type":"structure", + "required":[ + "arn", + "type" + ], + "members":{ + "arn":{"shape":"BraketResourceArn"}, + "type":{"shape":"AssociationType"} + } + }, + "AssociationType":{ + "type":"string", + "enum":["RESERVATION_TIME_WINDOW_ARN"] + }, + "Associations":{ + "type":"list", + "member":{"shape":"Association"} + }, + "BraketResourceArn":{ + "type":"string", + "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]*:[0-9]{12}:.*$" + }, "CancelJobRequest":{ "type":"structure", "required":["jobArn"], @@ -349,6 +372,7 @@ ], "members":{ "algorithmSpecification":{"shape":"AlgorithmSpecification"}, + "associations":{"shape":"CreateJobRequestAssociationsList"}, "checkpointConfig":{"shape":"JobCheckpointConfig"}, "clientToken":{ "shape":"String64", @@ -365,6 +389,12 @@ "tags":{"shape":"TagsMap"} } }, + "CreateJobRequestAssociationsList":{ + "type":"list", + "member":{"shape":"Association"}, + "max":1, + "min":0 + }, "CreateJobRequestInputDataConfigList":{ "type":"list", "member":{"shape":"InputFileConfig"}, @@ -399,6 +429,7 @@ "shape":"JsonValue", "jsonvalue":true }, + "associations":{"shape":"CreateQuantumTaskRequestAssociationsList"}, "clientToken":{ "shape":"String64", "idempotencyToken":true @@ -415,6 +446,12 @@ "tags":{"shape":"TagsMap"} } }, + "CreateQuantumTaskRequestAssociationsList":{ + "type":"list", + "member":{"shape":"Association"}, + "max":1, + "min":0 + }, "CreateQuantumTaskRequestDeviceParametersString":{ "type":"string", "max":48000, @@ -599,6 +636,7 @@ ], "members":{ "algorithmSpecification":{"shape":"AlgorithmSpecification"}, + "associations":{"shape":"Associations"}, "billableDuration":{"shape":"Integer"}, "checkpointConfig":{"shape":"JobCheckpointConfig"}, "createdAt":{"shape":"SyntheticTimestamp_date_time"}, @@ -655,6 +693,7 @@ "status" ], "members":{ + "associations":{"shape":"Associations"}, "createdAt":{"shape":"SyntheticTimestamp_date_time"}, "deviceArn":{"shape":"DeviceArn"}, "deviceParameters":{ @@ -703,7 +742,8 @@ "HyperParametersValueString":{ "type":"string", "max":2500, - "min":1 + "min":1, + "pattern":"^.*$" }, "InputConfigList":{ "type":"list", diff --git a/models/apis/braket/2019-09-01/docs-2.json b/models/apis/braket/2019-09-01/docs-2.json index 4d39d65ee59..de29dc7c077 100644 --- a/models/apis/braket/2019-09-01/docs-2.json +++ b/models/apis/braket/2019-09-01/docs-2.json @@ -29,6 +29,33 @@ "GetJobResponse$algorithmSpecification": "Definition of the Amazon Braket job created. Specifies the container image the job uses, information about the Python scripts used for entry and training, and the user-defined metrics used to evaluation the job.
" } }, + "Association": { + "base": "The Amazon Braket resource and the association type.
", + "refs": { + "Associations$member": null, + "CreateJobRequestAssociationsList$member": null, + "CreateQuantumTaskRequestAssociationsList$member": null + } + }, + "AssociationType": { + "base": null, + "refs": { + "Association$type": "The association type for the specified Amazon Braket resource arn.
" + } + }, + "Associations": { + "base": null, + "refs": { + "GetJobResponse$associations": "The list of Amazon Braket resources associated with the hybrid job.
", + "GetQuantumTaskResponse$associations": "The list of Amazon Braket resources associated with the quantum task.
" + } + }, + "BraketResourceArn": { + "base": null, + "refs": { + "Association$arn": "The Amazon Braket resource arn.
" + } + }, "CancelJobRequest": { "base": null, "refs": { @@ -78,6 +105,12 @@ "refs": { } }, + "CreateJobRequestAssociationsList": { + "base": null, + "refs": { + "CreateJobRequest$associations": "The list of Amazon Braket resources associated with the hybrid job.
" + } + }, "CreateJobRequestInputDataConfigList": { "base": null, "refs": { @@ -100,6 +133,12 @@ "refs": { } }, + "CreateQuantumTaskRequestAssociationsList": { + "base": null, + "refs": { + "CreateQuantumTaskRequest$associations": "The list of Amazon Braket resources associated with the quantum task.
" + } + }, "CreateQuantumTaskRequestDeviceParametersString": { "base": null, "refs": { @@ -454,7 +493,7 @@ "CancelQuantumTaskRequest$quantumTaskArn": "The ARN of the task to cancel.
", "CancelQuantumTaskResponse$quantumTaskArn": "The ARN of the task.
", "CreateQuantumTaskResponse$quantumTaskArn": "The ARN of the task created by the request.
", - "GetQuantumTaskRequest$quantumTaskArn": "the ARN of the task to retrieve.
", + "GetQuantumTaskRequest$quantumTaskArn": "The ARN of the task to retrieve.
", "GetQuantumTaskResponse$quantumTaskArn": "The ARN of the task.
", "QuantumTaskSummary$quantumTaskArn": "The ARN of the task.
" } @@ -747,7 +786,7 @@ "GetJobResponse$startedAt": "The date and time that the Amazon Braket job was started.
", "GetQuantumTaskResponse$createdAt": "The time at which the task was created.
", "GetQuantumTaskResponse$endedAt": "The time at which the task ended.
", - "JobEventDetails$timeOfEvent": "TThe type of event that occurred related to the Amazon Braket job.
", + "JobEventDetails$timeOfEvent": "The type of event that occurred related to the Amazon Braket job.
", "JobSummary$createdAt": "The date and time that the Amazon Braket job was created.
", "JobSummary$endedAt": "The date and time that the Amazon Braket job ended.
", "JobSummary$startedAt": "The date and time that the Amazon Braket job was started.
", diff --git a/models/apis/braket/2019-09-01/endpoint-rule-set-1.json b/models/apis/braket/2019-09-01/endpoint-rule-set-1.json index 7a57ae60375..6d366bfcdd1 100644 --- a/models/apis/braket/2019-09-01/endpoint-rule-set-1.json +++ b/models/apis/braket/2019-09-01/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/apis/cloud9/2017-09-23/api-2.json b/models/apis/cloud9/2017-09-23/api-2.json index a57b337bf02..aa9cbdd5a4b 100644 --- a/models/apis/cloud9/2017-09-23/api-2.json +++ b/models/apis/cloud9/2017-09-23/api-2.json @@ -289,7 +289,8 @@ "type":"structure", "required":[ "name", - "instanceType" + "instanceType", + "imageId" ], "members":{ "name":{"shape":"EnvironmentName"}, @@ -523,7 +524,7 @@ "type":"string", "max":20, "min":5, - "pattern":"^[a-z][1-9][.][a-z0-9]+$" + "pattern":"^[a-z]+[1-9][.][a-z0-9]+$" }, "InternalServerErrorException":{ "type":"structure", diff --git a/models/apis/cloud9/2017-09-23/docs-2.json b/models/apis/cloud9/2017-09-23/docs-2.json index 67cf30ade09..1324c02158a 100644 --- a/models/apis/cloud9/2017-09-23/docs-2.json +++ b/models/apis/cloud9/2017-09-23/docs-2.json @@ -232,7 +232,7 @@ "ImageId": { "base": null, "refs": { - "CreateEnvironmentEC2Request$imageId": "The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
From December 04, 2023, you will be required to include the imageId
parameter for the CreateEnvironmentEC2
action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.
From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users.
Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.
AMI aliases
Amazon Linux (default): amazonlinux-1-x86_64
Amazon Linux 2: amazonlinux-2-x86_64
Ubuntu 18.04: ubuntu-18.04-x86_64
Ubuntu 22.04: ubuntu-22.04-x86_64
SSM paths
Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
From December 04, 2023, you will be required to include the imageId
parameter for the CreateEnvironmentEC2
action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.
From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users.
Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.
AMI aliases
Amazon Linux: amazonlinux-1-x86_64
Amazon Linux 2: amazonlinux-2-x86_64
Ubuntu 18.04: ubuntu-18.04-x86_64
Ubuntu 22.04: ubuntu-22.04-x86_64
SSM paths
Amazon Linux: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and a dbPath. A changeType can has the following values:
PUT – Adds or updates files in a database.
DELETE – Deletes files in a database.
All the change requests require a mandatory dbPath attribute that defines the path within the database directory. The s3Path attribute defines the s3 source file path and is required for a PUT change type.
Here is an example of how you can use the change request object:
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}, { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}, { \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.01/\"} ]
In this example, the first request with PUT change type allows you to add files in the given s3Path under the 2020.01.02 partition of the database. The second request with PUT change type allows you to add a single sym file at database root location. The last request with DELETE change type allows you to delete the files under the 2020.01.01 partition of the database.
", + "CreateKxChangesetRequest$changeRequests": "A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. A changeType can has the following values:
PUT – Adds or updates files in a database.
DELETE – Deletes files in a database.
All the change requests require a mandatory dbPath
attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path
attribute defines the s3 source file path and is required for a PUT change type. The s3path
must end with a trailing / if it is a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
This request adds a single sym file at database root location.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}
This request adds files in the given s3Path
under the 2020.01.02 partition of the database.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}
This request adds files in the given s3Path
under the taq table partition of the database.
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
This request deletes the 2020.01.02 partition of the database.
[{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it.
[ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
A list of change requests.
", "GetKxChangesetResponse$changeRequests": "A list of change request objects that are run in order.
" } @@ -667,7 +667,7 @@ "CreateKxClusterResponse$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
You cannot update this parameter for a NO_RESTART
deployment.
The type of deployment that you want on a cluster.
ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration.
FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
The type of deployment that you want on a cluster.
ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration.
NO_RESTART – This option updates the cluster without stopping the running q process. It is only available for GP
type cluster. This option is quicker as it reduces the turn around time to update configuration on a cluster.
With this deployment mode, you cannot update the initializationScript
and commandLineArguments
parameters.
FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
Specifies the type of KDB database that is being created. The following types are available:
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
Defines the key-value pairs to make them available inside the cluster.
", "CreateKxClusterResponse$commandLineArguments": "Defines the key-value pairs to make them available inside the cluster.
", "GetKxClusterResponse$commandLineArguments": "Defines key-value pairs to make them available inside the cluster.
", - "UpdateKxClusterCodeConfigurationRequest$commandLineArguments": "Specifies the key-value pairs to make them available inside the cluster.
" + "UpdateKxClusterCodeConfigurationRequest$commandLineArguments": "Specifies the key-value pairs to make them available inside the cluster.
You cannot update this parameter for a NO_RESTART
deployment.
Amazon Web Services Service Catalog AppRegistry enables organizations to understand the application context of their Amazon Web Services resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.
", "operations": { "AssociateAttributeGroup": "Associates an attribute group with an application to augment the application's metadata with the group's attributes. This feature enables applications to be described with user-defined details that are machine-readable, such as third-party integrations.
", - "AssociateResource": "Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name.
Minimum permissions
You must have the following permissions to associate a resource using the OPTIONS
parameter set to APPLY_APPLICATION_TAG
.
tag:GetResources
tag:TagResources
You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess
policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide.
resource-groups:DisassociateResource
cloudformation:UpdateStack
cloudformation:DescribeStacks
In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name.
Minimum permissions
You must have the following permissions to associate a resource using the OPTIONS
parameter set to APPLY_APPLICATION_TAG
.
tag:GetResources
tag:TagResources
You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess
policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide.
resource-groups:AssociateResource
cloudformation:UpdateStack
cloudformation:DescribeStacks
In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions.
", "CreateAttributeGroup": "Creates a new attribute group as a container for user-defined attributes. This feature enables users to have full control over their cloud application's metadata in a rich machine-readable format to facilitate integration with automated workflows and third-party tools.
", "DeleteApplication": "Deletes an application that is specified either by its application ID, name, or ARN. All associated attribute groups and resources must be disassociated from it before deleting an application.
", diff --git a/models/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json b/models/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json index 39bcc0acd0f..b38c5782e7e 100644 --- a/models/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json +++ b/models/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json @@ -646,17 +646,6 @@ "expect": { "error": "Invalid Configuration: Missing Region" } - }, - { - "documentation": "Partition doesn't support DualStack", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } } ], "version": "1.0" diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 31291963ca1..a6d021a14f8 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -15873,6 +15873,30 @@ "deprecated" : true, "hostname" : "signer-fips.us-west-2.amazonaws.com" }, + "fips-verification-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "verification.signer-fips.us-east-1.amazonaws.com" + }, + "fips-verification-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "verification.signer-fips.us-east-2.amazonaws.com" + }, + "fips-verification-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "verification.signer-fips.us-west-1.amazonaws.com" + }, + "fips-verification-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "verification.signer-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -15898,6 +15922,126 @@ "hostname" : "signer-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] + }, + "verification-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "verification.signer.af-south-1.amazonaws.com" + }, + "verification-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "verification.signer.ap-east-1.amazonaws.com" + }, + "verification-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "verification.signer.ap-northeast-1.amazonaws.com" + }, + "verification-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "verification.signer.ap-northeast-2.amazonaws.com" + }, + "verification-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "verification.signer.ap-south-1.amazonaws.com" + }, + "verification-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "verification.signer.ap-southeast-1.amazonaws.com" + }, + "verification-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "verification.signer.ap-southeast-2.amazonaws.com" + }, + "verification-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "verification.signer.ca-central-1.amazonaws.com" + }, + "verification-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "verification.signer.eu-central-1.amazonaws.com" + }, + "verification-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "verification.signer.eu-north-1.amazonaws.com" + }, + "verification-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "verification.signer.eu-south-1.amazonaws.com" + }, + "verification-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "verification.signer.eu-west-1.amazonaws.com" + }, + "verification-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "verification.signer.eu-west-2.amazonaws.com" + }, + "verification-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "verification.signer.eu-west-3.amazonaws.com" + }, + "verification-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "verification.signer.me-south-1.amazonaws.com" + }, + "verification-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "verification.signer.sa-east-1.amazonaws.com" + }, + "verification-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "verification.signer.us-east-1.amazonaws.com" + }, + "verification-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "verification.signer.us-east-2.amazonaws.com" + }, + "verification-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "verification.signer.us-west-1.amazonaws.com" + }, + "verification-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "verification.signer.us-west-2.amazonaws.com" } } }, @@ -20228,7 +20372,19 @@ "signer" : { "endpoints" : { "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-northwest-1" : { }, + "verification-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "verification.signer.cn-north-1.amazonaws.com.cn" + }, + "verification-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "verification.signer.cn-northwest-1.amazonaws.com.cn" + } } }, "sms" : { @@ -25723,8 +25879,32 @@ }, "redshift" : { "endpoints" : { - "us-iso-east-1" : { }, - "us-iso-west-1" : { } + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "resource-groups" : { @@ -26262,7 +26442,19 @@ }, "redshift" : { "endpoints" : { - "us-isob-east-1" : { } + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } } }, "resource-groups" : { diff --git a/service/appregistry/api.go b/service/appregistry/api.go index 3a538f03941..385c6054b7f 100644 --- a/service/appregistry/api.go +++ b/service/appregistry/api.go @@ -168,7 +168,7 @@ func (c *AppRegistry) AssociateResourceRequest(input *AssociateResourceInput) (r // (https://docs.aws.amazon.com/servicecatalog/latest/arguide/full.html) in // the AppRegistry Administrator Guide. // -// - resource-groups:DisassociateResource +// - resource-groups:AssociateResource // // - cloudformation:UpdateStack // diff --git a/service/billingconductor/api.go b/service/billingconductor/api.go index 4b0b6d33d44..6a03b9873cf 100644 --- a/service/billingconductor/api.go +++ b/service/billingconductor/api.go @@ -4688,6 +4688,10 @@ func (s *CreateBillingGroupOutput) SetArn(v string) *CreateBillingGroupOutput { type CreateCustomLineItemInput struct { _ struct{} `type:"structure"` + // The Amazon Web Services account in which this custom line item will be applied + // to. + AccountId *string `type:"string"` + // The Amazon Resource Name (ARN) that references the billing group where the // custom line item applies to. // @@ -4793,6 +4797,12 @@ func (s *CreateCustomLineItemInput) Validate() error { return nil } +// SetAccountId sets the AccountId field's value. +func (s *CreateCustomLineItemInput) SetAccountId(v string) *CreateCustomLineItemInput { + s.AccountId = &v + return s +} + // SetBillingGroupArn sets the BillingGroupArn field's value. func (s *CreateCustomLineItemInput) SetBillingGroupArn(v string) *CreateCustomLineItemInput { s.BillingGroupArn = &v @@ -5533,6 +5543,10 @@ func (s *CustomLineItemFlatChargeDetails) SetChargeValue(v float64) *CustomLineI type CustomLineItemListElement struct { _ struct{} `type:"structure"` + // The Amazon Web Services account in which this custom line item will be applied + // to. + AccountId *string `type:"string"` + // The Amazon Resource Names (ARNs) for custom line items. Arn *string `type:"string"` @@ -5594,6 +5608,12 @@ func (s CustomLineItemListElement) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *CustomLineItemListElement) SetAccountId(v string) *CustomLineItemListElement { + s.AccountId = &v + return s +} + // SetArn sets the Arn field's value. func (s *CustomLineItemListElement) SetArn(v string) *CustomLineItemListElement { s.Arn = &v @@ -5716,6 +5736,10 @@ func (s *CustomLineItemPercentageChargeDetails) SetPercentageValue(v float64) *C type CustomLineItemVersionListElement struct { _ struct{} `type:"structure"` + // The Amazon Web Services account in which this custom line item will be applied + // to. + AccountId *string `type:"string"` + // A list of custom line item Amazon Resource Names (ARNs) to retrieve information. Arn *string `type:"string"` @@ -5783,6 +5807,12 @@ func (s CustomLineItemVersionListElement) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *CustomLineItemVersionListElement) SetAccountId(v string) *CustomLineItemVersionListElement { + s.AccountId = &v + return s +} + // SetArn sets the Arn field's value. func (s *CustomLineItemVersionListElement) SetArn(v string) *CustomLineItemVersionListElement { s.Arn = &v @@ -7516,6 +7546,10 @@ func (s *ListCustomLineItemVersionsOutput) SetNextToken(v string) *ListCustomLin type ListCustomLineItemsFilter struct { _ struct{} `type:"structure"` + // The Amazon Web Services accounts in which this custom line item will be applied + // to. + AccountIds []*string `min:"1" type:"list"` + // A list of custom line item ARNs to retrieve information. Arns []*string `min:"1" type:"list"` @@ -7547,6 +7581,9 @@ func (s ListCustomLineItemsFilter) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListCustomLineItemsFilter) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListCustomLineItemsFilter"} + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } if s.Arns != nil && len(s.Arns) < 1 { invalidParams.Add(request.NewErrParamMinLen("Arns", 1)) } @@ -7563,6 +7600,12 @@ func (s *ListCustomLineItemsFilter) Validate() error { return nil } +// SetAccountIds sets the AccountIds field's value. +func (s *ListCustomLineItemsFilter) SetAccountIds(v []*string) *ListCustomLineItemsFilter { + s.AccountIds = v + return s +} + // SetArns sets the Arns field's value. func (s *ListCustomLineItemsFilter) SetArns(v []*string) *ListCustomLineItemsFilter { s.Arns = v @@ -10963,6 +11006,9 @@ const ( // ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup is a ValidationExceptionReason enum value ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup = "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" + + // ValidationExceptionReasonIllegalAccountId is a ValidationExceptionReason enum value + ValidationExceptionReasonIllegalAccountId = "ILLEGAL_ACCOUNT_ID" ) // ValidationExceptionReason_Values returns all elements of the ValidationExceptionReason enum @@ -11027,5 +11073,6 @@ func ValidationExceptionReason_Values() []string { ValidationExceptionReasonInvalidFilter, ValidationExceptionReasonTooManyAutoAssociateBillingGroups, ValidationExceptionReasonCannotDeleteAutoAssociateBillingGroup, + ValidationExceptionReasonIllegalAccountId, } } diff --git a/service/braket/api.go b/service/braket/api.go index cbbadae77c0..f5bddd5365c 100644 --- a/service/braket/api.go +++ b/service/braket/api.go @@ -1508,6 +1508,67 @@ func (s *AlgorithmSpecification) SetScriptModeConfig(v *ScriptModeConfig) *Algor return s } +// The Amazon Braket resource and the association type. +type Association struct { + _ struct{} `type:"structure"` + + // The Amazon Braket resource arn. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The association type for the specified Amazon Braket resource arn. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"AssociationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Association) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Association) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Association) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Association"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *Association) SetArn(v string) *Association { + s.Arn = &v + return s +} + +// SetType sets the Type field's value. +func (s *Association) SetType(v string) *Association { + s.Type = &v + return s +} + type CancelJobInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -1830,6 +1891,9 @@ type CreateJobInput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `locationName:"algorithmSpecification" type:"structure" required:"true"` + // The list of Amazon Braket resources associated with the hybrid job. + Associations []*Association `locationName:"associations" type:"list"` + // Information about the output locations for job checkpoint data. CheckpointConfig *JobCheckpointConfig `locationName:"checkpointConfig" type:"structure"` @@ -1935,6 +1999,16 @@ func (s *CreateJobInput) Validate() error { invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) } } + if s.Associations != nil { + for i, v := range s.Associations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Associations", i), err.(request.ErrInvalidParams)) + } + } + } if s.CheckpointConfig != nil { if err := s.CheckpointConfig.Validate(); err != nil { invalidParams.AddNested("CheckpointConfig", err.(request.ErrInvalidParams)) @@ -1983,6 +2057,12 @@ func (s *CreateJobInput) SetAlgorithmSpecification(v *AlgorithmSpecification) *C return s } +// SetAssociations sets the Associations field's value. +func (s *CreateJobInput) SetAssociations(v []*Association) *CreateJobInput { + s.Associations = v + return s +} + // SetCheckpointConfig sets the CheckpointConfig field's value. func (s *CreateJobInput) SetCheckpointConfig(v *JobCheckpointConfig) *CreateJobInput { s.CheckpointConfig = v @@ -2090,6 +2170,9 @@ type CreateQuantumTaskInput struct { // Action is a required field Action aws.JSONValue `locationName:"action" type:"jsonvalue" required:"true"` + // The list of Amazon Braket resources associated with the quantum task. + Associations []*Association `locationName:"associations" type:"list"` + // The client token associated with the request. ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` @@ -2174,6 +2257,16 @@ func (s *CreateQuantumTaskInput) Validate() error { if s.Shots == nil { invalidParams.Add(request.NewErrParamRequired("Shots")) } + if s.Associations != nil { + for i, v := range s.Associations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Associations", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2187,6 +2280,12 @@ func (s *CreateQuantumTaskInput) SetAction(v aws.JSONValue) *CreateQuantumTaskIn return s } +// SetAssociations sets the Associations field's value. +func (s *CreateQuantumTaskInput) SetAssociations(v []*Association) *CreateQuantumTaskInput { + s.Associations = v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreateQuantumTaskInput) SetClientToken(v string) *CreateQuantumTaskInput { s.ClientToken = &v @@ -2848,6 +2947,9 @@ type GetJobOutput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `locationName:"algorithmSpecification" type:"structure" required:"true"` + // The list of Amazon Braket resources associated with the hybrid job. + Associations []*Association `locationName:"associations" type:"list"` + // The billable time the Amazon Braket job used to complete. BillableDuration *int64 `locationName:"billableDuration" type:"integer"` @@ -2956,6 +3058,12 @@ func (s *GetJobOutput) SetAlgorithmSpecification(v *AlgorithmSpecification) *Get return s } +// SetAssociations sets the Associations field's value. +func (s *GetJobOutput) SetAssociations(v []*Association) *GetJobOutput { + s.Associations = v + return s +} + // SetBillableDuration sets the BillableDuration field's value. func (s *GetJobOutput) SetBillableDuration(v int64) *GetJobOutput { s.BillableDuration = &v @@ -3076,7 +3184,7 @@ type GetQuantumTaskInput struct { // A list of attributes to return information for. AdditionalAttributeNames []*string `location:"querystring" locationName:"additionalAttributeNames" type:"list" enum:"QuantumTaskAdditionalAttributeName"` - // the ARN of the task to retrieve. + // The ARN of the task to retrieve. // // QuantumTaskArn is a required field QuantumTaskArn *string `location:"uri" locationName:"quantumTaskArn" type:"string" required:"true"` @@ -3131,6 +3239,9 @@ func (s *GetQuantumTaskInput) SetQuantumTaskArn(v string) *GetQuantumTaskInput { type GetQuantumTaskOutput struct { _ struct{} `type:"structure"` + // The list of Amazon Braket resources associated with the quantum task. + Associations []*Association `locationName:"associations" type:"list"` + // The time at which the task was created. // // CreatedAt is a required field @@ -3207,6 +3318,12 @@ func (s GetQuantumTaskOutput) GoString() string { return s.String() } +// SetAssociations sets the Associations field's value. +func (s *GetQuantumTaskOutput) SetAssociations(v []*Association) *GetQuantumTaskOutput { + s.Associations = v + return s +} + // SetCreatedAt sets the CreatedAt field's value. func (s *GetQuantumTaskOutput) SetCreatedAt(v time.Time) *GetQuantumTaskOutput { s.CreatedAt = &v @@ -3640,7 +3757,7 @@ type JobEventDetails struct { // job. Message *string `locationName:"message" type:"string"` - // TThe type of event that occurred related to the Amazon Braket job. + // The type of event that occurred related to the Amazon Braket job. TimeOfEvent *time.Time `locationName:"timeOfEvent" type:"timestamp" timestampFormat:"iso8601"` } @@ -5307,6 +5424,18 @@ func (s *ValidationException) RequestID() string { return s.RespMetadata.RequestID } +const ( + // AssociationTypeReservationTimeWindowArn is a AssociationType enum value + AssociationTypeReservationTimeWindowArn = "RESERVATION_TIME_WINDOW_ARN" +) + +// AssociationType_Values returns all elements of the AssociationType enum +func AssociationType_Values() []string { + return []string{ + AssociationTypeReservationTimeWindowArn, + } +} + const ( // CancellationStatusCancelling is a CancellationStatus enum value CancellationStatusCancelling = "CANCELLING" diff --git a/service/cloud9/api.go b/service/cloud9/api.go index 028f6a948ba..15792334399 100644 --- a/service/cloud9/api.go +++ b/service/cloud9/api.go @@ -1619,7 +1619,7 @@ type CreateEnvironmentEC2Input struct { // // AMI aliases // - // * Amazon Linux (default): amazonlinux-1-x86_64 + // * Amazon Linux: amazonlinux-1-x86_64 // // * Amazon Linux 2: amazonlinux-2-x86_64 // @@ -1629,14 +1629,16 @@ type CreateEnvironmentEC2Input struct { // // SSM paths // - // * Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 + // * Amazon Linux: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 // // * Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 // // * Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 // // * Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64 - ImageId *string `locationName:"imageId" type:"string"` + // + // ImageId is a required field + ImageId *string `locationName:"imageId" type:"string" required:"true"` // The type of instance to connect to the environment (for example, t2.micro). // @@ -1689,6 +1691,9 @@ func (s CreateEnvironmentEC2Input) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateEnvironmentEC2Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateEnvironmentEC2Input"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } if s.InstanceType == nil { invalidParams.Add(request.NewErrParamRequired("InstanceType")) } diff --git a/service/cloudformation/waiters.go b/service/cloudformation/waiters.go index 28977d0a137..a9c2fdbb599 100644 --- a/service/cloudformation/waiters.go +++ b/service/cloudformation/waiters.go @@ -92,6 +92,46 @@ func (c *CloudFormation) WaitUntilStackCreateCompleteWithContext(ctx aws.Context Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", Expected: "CREATE_COMPLETE", }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_IN_PROGRESS", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_FAILED", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_IN_PROGRESS", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_FAILED", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE", + }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", diff --git a/service/finspace/api.go b/service/finspace/api.go index cb186dca6b7..806a0eb2388 100644 --- a/service/finspace/api.go +++ b/service/finspace/api.go @@ -4262,7 +4262,7 @@ type CreateKxChangesetInput struct { _ struct{} `type:"structure"` // A list of change request objects that are run in order. A change request - // object consists of changeType , s3Path, and a dbPath. A changeType can has + // object consists of changeType , s3Path, and dbPath. A changeType can has // the following values: // // * PUT – Adds or updates files in a database. @@ -4270,20 +4270,38 @@ type CreateKxChangesetInput struct { // * DELETE – Deletes files in a database. // // All the change requests require a mandatory dbPath attribute that defines - // the path within the database directory. The s3Path attribute defines the - // s3 source file path and is required for a PUT change type. + // the path within the database directory. All database paths must start with + // a leading / and end with a trailing /. The s3Path attribute defines the s3 + // source file path and is required for a PUT change type. The s3path must end + // with a trailing / if it is a directory and must end without a trailing / + // if it is a file. // - // Here is an example of how you can use the change request object: + // Here are few examples of how you can use the change request object: // - // [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"}, - // { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"}, { "changeType": - // "DELETE", "dbPath": "/2020.01.01/"} ] + // This request adds a single sym file at database root location. // - // In this example, the first request with PUT change type allows you to add - // files in the given s3Path under the 2020.01.02 partition of the database. - // The second request with PUT change type allows you to add a single sym file - // at database root location. The last request with DELETE change type allows - // you to delete the files under the 2020.01.01 partition of the database. + // { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"} + // + // This request adds files in the given s3Path under the 2020.01.02 partition + // of the database. + // + // { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"} + // + // This request adds files in the given s3Path under the taq table partition + // of the database. + // + // [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}] + // + // This request deletes the 2020.01.02 partition of the database. + // + // [{ "changeType": "DELETE", "dbPath": "/2020.01.02/"} ] + // + // The DELETE request allows you to delete the existing files under the 2020.01.02 + // partition of the database, and the PUT request adds a new taq table under + // it. + // + // [ {"changeType": "DELETE", "dbPath":"/2020.01.02/"}, {"changeType": "PUT", + // "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}] // // ChangeRequests is a required field ChangeRequests []*ChangeRequest `locationName:"changeRequests" min:"1" type:"list" required:"true"` @@ -4552,6 +4570,13 @@ type CreateKxClusterInput struct { // initialization scripts and custom code. This type of cluster does not // require a writable local storage. // + // * GP – A general purpose cluster allows you to quickly iterate on code + // during development by granting greater access to system commands and enabling + // a fast reload of custom code. This cluster type can optionally mount databases + // including cache and savedown storage. For this cluster type, the node + // count is fixed at 1. It does not support autoscaling and supports only + // SINGLE AZ mode. + // // ClusterType is a required field ClusterType *string `locationName:"clusterType" type:"string" required:"true" enum:"KxClusterType"` @@ -4890,6 +4915,13 @@ type CreateKxClusterOutput struct { // in kdb systems. It allows you to create your own routing logic using the // initialization scripts and custom code. This type of cluster does not // require a writable local storage. + // + // * GP – A general purpose cluster allows you to quickly iterate on code + // during development by granting greater access to system commands and enabling + // a fast reload of custom code. This cluster type can optionally mount databases + // including cache and savedown storage. For this cluster type, the node + // count is fixed at 1. It does not support autoscaling and supports only + // SINGLE AZ mode. ClusterType *string `locationName:"clusterType" type:"string" enum:"KxClusterType"` // The details of the custom code that you want to use inside a cluster when @@ -6819,6 +6851,13 @@ type GetKxClusterOutput struct { // in kdb systems. It allows you to create your own routing logic using the // initialization scripts and custom code. This type of cluster does not // require a writable local storage. + // + // * GP – A general purpose cluster allows you to quickly iterate on code + // during development by granting greater access to system commands and enabling + // a fast reload of custom code. This cluster type can optionally mount databases + // including cache and savedown storage. For this cluster type, the node + // count is fixed at 1. It does not support autoscaling and supports only + // SINGLE AZ mode. ClusterType *string `locationName:"clusterType" type:"string" enum:"KxClusterType"` // The details of the custom code that you want to use inside a cluster when @@ -8069,6 +8108,13 @@ type KxCluster struct { // in kdb systems. It allows you to create your own routing logic using the // initialization scripts and custom code. This type of cluster does not // require a writable local storage. + // + // * GP – A general purpose cluster allows you to quickly iterate on code + // during development by granting greater access to system commands and enabling + // a fast reload of custom code. This cluster type can optionally mount databases + // including cache and savedown storage. For this cluster type, the node + // count is fixed at 1. It does not support autoscaling and supports only + // SINGLE AZ mode. ClusterType *string `locationName:"clusterType" type:"string" enum:"KxClusterType"` // The timestamp at which the cluster was created in FinSpace. The value is @@ -8217,6 +8263,12 @@ type KxClusterCodeDeploymentConfiguration struct { // * ROLLING – This options updates the cluster by stopping the exiting // q process and starting a new q process with updated configuration. // + // * NO_RESTART – This option updates the cluster without stopping the + // running q process. It is only available for GP type cluster. This option + // is quicker as it reduces the turn around time to update configuration + // on a cluster. With this deployment mode, you cannot update the initializationScript + // and commandLineArguments parameters. + // // * FORCE – This option updates the cluster by immediately stopping all // the running processes before starting up new ones with the updated configuration. // @@ -9412,6 +9464,13 @@ type ListKxClustersInput struct { // in kdb systems. It allows you to create your own routing logic using the // initialization scripts and custom code. This type of cluster does not // require a writable local storage. + // + // * GP – A general purpose cluster allows you to quickly iterate on code + // during development by granting greater access to system commands and enabling + // a fast reload of custom code. This cluster type can optionally mount databases + // including cache and savedown storage. For this cluster type, the node + // count is fixed at 1. It does not support autoscaling and supports only + // SINGLE AZ mode. ClusterType *string `location:"querystring" locationName:"clusterType" type:"string" enum:"KxClusterType"` // A unique identifier for the kdb environment. @@ -10880,6 +10939,8 @@ type UpdateKxClusterCodeConfigurationInput struct { Code *CodeConfiguration `locationName:"code" type:"structure" required:"true"` // Specifies the key-value pairs to make them available inside the cluster. + // + // You cannot update this parameter for a NO_RESTART deployment. CommandLineArguments []*KxCommandLineArgument `locationName:"commandLineArguments" type:"list"` // The configuration that allows you to choose how you want to update the code @@ -10894,6 +10955,8 @@ type UpdateKxClusterCodeConfigurationInput struct { // Specifies a Q program that will be run at launch of a cluster. It is a relative // path within .zip file that contains the custom code, which will be loaded // on the cluster. It must include the file name itself. For example, somedir/init.q. + // + // You cannot update this parameter for a NO_RESTART deployment. InitializationScript *string `locationName:"initializationScript" min:"1" type:"string"` } @@ -12360,6 +12423,9 @@ func KxAzMode_Values() []string { } const ( + // KxClusterCodeDeploymentStrategyNoRestart is a KxClusterCodeDeploymentStrategy enum value + KxClusterCodeDeploymentStrategyNoRestart = "NO_RESTART" + // KxClusterCodeDeploymentStrategyRolling is a KxClusterCodeDeploymentStrategy enum value KxClusterCodeDeploymentStrategyRolling = "ROLLING" @@ -12370,6 +12436,7 @@ const ( // KxClusterCodeDeploymentStrategy_Values returns all elements of the KxClusterCodeDeploymentStrategy enum func KxClusterCodeDeploymentStrategy_Values() []string { return []string{ + KxClusterCodeDeploymentStrategyNoRestart, KxClusterCodeDeploymentStrategyRolling, KxClusterCodeDeploymentStrategyForce, } @@ -12424,6 +12491,9 @@ const ( // KxClusterTypeGateway is a KxClusterType enum value KxClusterTypeGateway = "GATEWAY" + + // KxClusterTypeGp is a KxClusterType enum value + KxClusterTypeGp = "GP" ) // KxClusterType_Values returns all elements of the KxClusterType enum @@ -12432,6 +12502,7 @@ func KxClusterType_Values() []string { KxClusterTypeHdb, KxClusterTypeRdb, KxClusterTypeGateway, + KxClusterTypeGp, } } diff --git a/service/medialive/api.go b/service/medialive/api.go index 24747501472..f338d46abee 100644 --- a/service/medialive/api.go +++ b/service/medialive/api.go @@ -10807,6 +10807,147 @@ func (s ClaimDeviceOutput) GoString() string { return s.String() } +// Property of ColorCorrectionSettings. Used for custom color space conversion. +// The object identifies one 3D LUT file and specifies the input/output color +// space combination that the file will be used for. +type ColorCorrection struct { + _ struct{} `type:"structure"` + + // The color space of the input. + // + // InputColorSpace is a required field + InputColorSpace *string `locationName:"inputColorSpace" type:"string" required:"true" enum:"ColorSpace"` + + // The color space of the output. + // + // OutputColorSpace is a required field + OutputColorSpace *string `locationName:"outputColorSpace" type:"string" required:"true" enum:"ColorSpace"` + + // The URI of the 3D LUT file. The protocol must be 's3:' or 's3ssl:':. + // + // Uri is a required field + Uri *string `locationName:"uri" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColorCorrection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColorCorrection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColorCorrection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ColorCorrection"} + if s.InputColorSpace == nil { + invalidParams.Add(request.NewErrParamRequired("InputColorSpace")) + } + if s.OutputColorSpace == nil { + invalidParams.Add(request.NewErrParamRequired("OutputColorSpace")) + } + if s.Uri == nil { + invalidParams.Add(request.NewErrParamRequired("Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputColorSpace sets the InputColorSpace field's value. +func (s *ColorCorrection) SetInputColorSpace(v string) *ColorCorrection { + s.InputColorSpace = &v + return s +} + +// SetOutputColorSpace sets the OutputColorSpace field's value. +func (s *ColorCorrection) SetOutputColorSpace(v string) *ColorCorrection { + s.OutputColorSpace = &v + return s +} + +// SetUri sets the Uri field's value. +func (s *ColorCorrection) SetUri(v string) *ColorCorrection { + s.Uri = &v + return s +} + +// Property of encoderSettings. Controls color conversion when you are using +// 3D LUT files to perform color conversion on video. +type ColorCorrectionSettings struct { + _ struct{} `type:"structure"` + + // An array of colorCorrections that applies when you are using 3D LUT files + // to perform color conversion on video. Each colorCorrection contains one 3D + // LUT file (that defines the color mapping for converting an input color space + // to an output color space), and the input/output combination that this 3D + // LUT file applies to. MediaLive reads the color space in the input metadata, + // determines the color space that you have specified for the output, and finds + // and uses the LUT file that applies to this combination. + // + // GlobalColorCorrections is a required field + GlobalColorCorrections []*ColorCorrection `locationName:"globalColorCorrections" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColorCorrectionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColorCorrectionSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColorCorrectionSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ColorCorrectionSettings"} + if s.GlobalColorCorrections == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalColorCorrections")) + } + if s.GlobalColorCorrections != nil { + for i, v := range s.GlobalColorCorrections { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalColorCorrections", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalColorCorrections sets the GlobalColorCorrections field's value. +func (s *ColorCorrectionSettings) SetGlobalColorCorrections(v []*ColorCorrection) *ColorCorrectionSettings { + s.GlobalColorCorrections = v + return s +} + // Passthrough applies no color space conversion to the output type ColorSpacePassthroughSettings struct { _ struct{} `type:"structure"` @@ -15630,6 +15771,9 @@ type EncoderSettings struct { // Settings for caption decriptions CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` + // Color correction settings + ColorCorrectionSettings *ColorCorrectionSettings `locationName:"colorCorrectionSettings" type:"structure"` + // Feature Activations FeatureActivations *FeatureActivations `locationName:"featureActivations" type:"structure"` @@ -15725,6 +15869,11 @@ func (s *EncoderSettings) Validate() error { } } } + if s.ColorCorrectionSettings != nil { + if err := s.ColorCorrectionSettings.Validate(); err != nil { + invalidParams.AddNested("ColorCorrectionSettings", err.(request.ErrInvalidParams)) + } + } if s.GlobalConfiguration != nil { if err := s.GlobalConfiguration.Validate(); err != nil { invalidParams.AddNested("GlobalConfiguration", err.(request.ErrInvalidParams)) @@ -15802,6 +15951,12 @@ func (s *EncoderSettings) SetCaptionDescriptions(v []*CaptionDescription) *Encod return s } +// SetColorCorrectionSettings sets the ColorCorrectionSettings field's value. +func (s *EncoderSettings) SetColorCorrectionSettings(v *ColorCorrectionSettings) *EncoderSettings { + s.ColorCorrectionSettings = v + return s +} + // SetFeatureActivations sets the FeatureActivations field's value. func (s *EncoderSettings) SetFeatureActivations(v *FeatureActivations) *EncoderSettings { s.FeatureActivations = v @@ -34506,6 +34661,32 @@ func ChannelState_Values() []string { } } +// Property of colorCorrections. When you are using 3D LUT files to perform +// color conversion on video, these are the supported color spaces. +const ( + // ColorSpaceHdr10 is a ColorSpace enum value + ColorSpaceHdr10 = "HDR10" + + // ColorSpaceHlg2020 is a ColorSpace enum value + ColorSpaceHlg2020 = "HLG_2020" + + // ColorSpaceRec601 is a ColorSpace enum value + ColorSpaceRec601 = "REC_601" + + // ColorSpaceRec709 is a ColorSpace enum value + ColorSpaceRec709 = "REC_709" +) + +// ColorSpace_Values returns all elements of the ColorSpace enum +func ColorSpace_Values() []string { + return []string{ + ColorSpaceHdr10, + ColorSpaceHlg2020, + ColorSpaceRec601, + ColorSpaceRec709, + } +} + const ( // ContentTypeImageJpeg is a ContentType enum value ContentTypeImageJpeg = "image/jpeg"