diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b1c9ffb565..5cf4d1f157e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,15 @@ +Release v1.51.8 (2024-03-26) +=== + +### Service Client Updates +* `service/bedrock-agent-runtime`: Updates service API and documentation +* `service/ce`: Updates service API, documentation, and paginators +* `service/ec2`: Updates service API and documentation + * Documentation updates for Elastic Compute Cloud (EC2). +* `service/ecs`: Updates service documentation + * This is a documentation update for Amazon ECS. +* `service/finspace`: Updates service API and documentation + Release v1.51.7 (2024-03-25) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index b6d122c154a..0ff089babe4 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -19980,12 +19980,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 46674253615..f31edbbc909 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.51.7" +const SDKVersion = "1.51.8" diff --git a/models/apis/bedrock-agent-runtime/2023-07-26/api-2.json b/models/apis/bedrock-agent-runtime/2023-07-26/api-2.json index 83b6ee2cc86..14f6990d199 100644 --- a/models/apis/bedrock-agent-runtime/2023-07-26/api-2.json +++ b/models/apis/bedrock-agent-runtime/2023-07-26/api-2.json @@ -232,6 +232,12 @@ "textResponsePart":{"shape":"TextResponsePart"} } }, + "GenerationConfiguration":{ + "type":"structure", + "members":{ + "promptTemplate":{"shape":"PromptTemplate"} + } + }, "InferenceConfiguration":{ "type":"structure", "members":{ @@ -397,6 +403,7 @@ "modelArn" ], "members":{ + "generationConfiguration":{"shape":"GenerationConfiguration"}, "knowledgeBaseId":{"shape":"KnowledgeBaseId"}, "modelArn":{"shape":"BedrockModelArn"}, "retrievalConfiguration":{"shape":"KnowledgeBaseRetrievalConfiguration"} @@ -415,7 +422,7 @@ "KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger":{ "type":"integer", "box":true, - "max":25, + "max":100, "min":1 }, "LambdaArn":{"type":"string"}, @@ -557,6 +564,12 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "PromptTemplate":{ + "type":"structure", + "members":{ + "textPromptTemplate":{"shape":"TextPromptTemplate"} + } + }, "PromptText":{ "type":"string", "sensitive":true @@ -824,6 +837,12 @@ "max":1, "min":0 }, + "TextPromptTemplate":{ + "type":"string", + "max":4000, + "min":1, + "sensitive":true + }, "TextResponsePart":{ "type":"structure", "members":{ diff --git a/models/apis/bedrock-agent-runtime/2023-07-26/docs-2.json b/models/apis/bedrock-agent-runtime/2023-07-26/docs-2.json index 5a89eb186ca..47db6d4dacd 100644 --- a/models/apis/bedrock-agent-runtime/2023-07-26/docs-2.json +++ b/models/apis/bedrock-agent-runtime/2023-07-26/docs-2.json @@ -4,7 +4,7 @@ "operations": { "InvokeAgent": "
Sends a prompt for the agent to process and respond to.
The CLI doesn't support InvokeAgent
.
To continue the same conversation with an agent, use the same sessionId
value in the request.
To activate trace enablement, turn enableTrace
to true
. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.
End a conversation by setting endSession
to true
.
Include attributes for the session or prompt in the sessionState
object.
The response is returned in the bytes
field of the chunk
object.
The attribution
object contains citations for parts of the response.
If you set enableTrace
to true
in the request, you can trace the agent's steps and reasoning process that led it to the response.
Errors are also surfaced in the response.
Queries a knowledge base and retrieves information from it.
", - "RetrieveAndGenerate": "Queries a knowledge base and generates responses based on the retrieved results. The response cites up to five sources but only selects the ones that are relevant to the query.
The numberOfResults
field is currently unsupported for RetrieveAndGenerate
. Don't include it in the vectorSearchConfiguration object.
Queries a knowledge base and generates responses based on the retrieved results. The response cites up to five sources but only selects the ones that are relevant to the query.
" }, "shapes": { "AccessDeniedException": { @@ -84,7 +84,7 @@ } }, "Citation": { - "base": "An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.
", + "base": "An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.
This data type is used in the following API operations:
Retrieve response – in the citations
field
RetrieveAndGenerate response – in the citations
field
Contains metadata about a part of the generated response that is accompanied by a citation.
", + "base": "Contains metadata about a part of the generated response that is accompanied by a citation.
This data type is used in the following API operations:
Retrieve response – in the generatedResponsePart
field
RetrieveAndGenerate response – in the generatedResponsePart
field
Contains the generated response and metadata
" } }, + "GenerationConfiguration": { + "base": "Contains configurations for response generation based on the knowledge base query results.
This data type is used in the following API operations:
", + "refs": { + "KnowledgeBaseRetrieveAndGenerateConfiguration$generationConfiguration": "Contains configurations for response generation based on the knowwledge base query results.
" + } + }, "InferenceConfiguration": { "base": "Specifications about the inference parameters that were provided alongside the prompt. These are specified in the PromptOverrideConfiguration object that was set when the agent was created or updated. For more information, see Inference parameters for foundation models.
", "refs": { @@ -229,9 +235,9 @@ } }, "KnowledgeBaseQuery": { - "base": "Contains the query made to the knowledge base.
", + "base": "Contains the query made to the knowledge base.
This data type is used in the following API operations:
Retrieve request – in the retrievalQuery
field
The query to send the knowledge base.
" + "RetrieveRequest$retrievalQuery": "Contains the query to send the knowledge base.
" } }, "KnowledgeBaseQueryTextString": { @@ -241,14 +247,14 @@ } }, "KnowledgeBaseRetrievalConfiguration": { - "base": "Contains details about how the results should be returned.
This data type is used in the following API operations:
", + "base": "Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
This data type is used in the following API operations:
Retrieve request – in the retrievalConfiguration
field
RetrieveAndGenerate request – in the retrievalConfiguration
field
Contains configurations for how to retrieve and return the knowledge base query.
", - "RetrieveRequest$retrievalConfiguration": "Contains details about how the results should be returned.
" + "RetrieveRequest$retrievalConfiguration": "Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
" } }, "KnowledgeBaseRetrievalResult": { - "base": "Details about a result from querying the knowledge base.
", + "base": "Details about a result from querying the knowledge base.
This data type is used in the following API operations:
Retrieve response – in the retrievalResults
field
Contains details about the resource being queried.
", + "base": "Contains details about the resource being queried.
This data type is used in the following API operations:
Retrieve request – in the knowledgeBaseConfiguration
field
RetrieveAndGenerate request – in the knowledgeBaseConfiguration
field
Contains details about the resource being queried.
" } }, "KnowledgeBaseVectorSearchConfiguration": { - "base": "Configurations for how to carry out the search.
", + "base": "Configurations for how to perform the search query and return results. For more information, see Query configurations.
This data type is used in the following API operations:
Retrieve request – in the vectorSearchConfiguration
field
RetrieveAndGenerate request – in the vectorSearchConfiguration
field
Contains details about how the results from the vector search should be returned.
" + "KnowledgeBaseRetrievalConfiguration$vectorSearchConfiguration": "Contains details about how the results from the vector search should be returned. For more information, see Query configurations.
" } }, "KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger": { "base": null, "refs": { - "KnowledgeBaseVectorSearchConfiguration$numberOfResults": "The number of results to return.
The numberOfResults
field is currently unsupported for RetrieveAndGenerate
. Don't include it in this field if you are sending a RetrieveAndGenerate
request.
The number of source chunks to retrieve.
" } }, "LambdaArn": { @@ -411,6 +417,12 @@ "SessionState$promptSessionAttributes": "Contains attributes that persist across a prompt and the values of those attributes. These attributes replace the $prompt_session_attributes$ placeholder variable in the orchestration prompt template. For more information, see Prompt template placeholder variables.
" } }, + "PromptTemplate": { + "base": "Contains the template for the prompt that's sent to the model for response generation. For more information, see Knowledge base prompt templates.
This data type is used in the following API operations:
", + "refs": { + "GenerationConfiguration$promptTemplate": "Contains the template for the prompt that's sent to the model for response generation.
" + } + }, "PromptText": { "base": null, "refs": { @@ -461,14 +473,14 @@ } }, "RetrievalResultContent": { - "base": "Contains the cited text from the data source.
", + "base": "Contains the cited text from the data source.
This data type is used in the following API operations:
Retrieve response – in the content
field
RetrieveAndGenerate response – in the content
field
Retrieve response – in the content
field
Contains a chunk of text from a data source in the knowledge base.
", "RetrievedReference$content": "Contains the cited text from the data source.
" } }, "RetrievalResultLocation": { - "base": "Contains information about the location of the data source.
", + "base": "Contains information about the location of the data source.
This data type is used in the following API operations:
Retrieve response – in the location
field
RetrieveAndGenerate response – in the location
field
Retrieve response – in the locatino
field
Contains information about the location of the data source.
", "RetrievedReference$location": "Contains information about the location of the data source.
" @@ -481,21 +493,21 @@ } }, "RetrievalResultS3Location": { - "base": "Contains the S3 location of the data source.
", + "base": "Contains the S3 location of the data source.
This data type is used in the following API operations:
Retrieve response – in the s3Location
field
RetrieveAndGenerate response – in the s3Location
field
Retrieve response – in the s3Location
field
Contains the S3 location of the data source.
" } }, "RetrieveAndGenerateConfiguration": { - "base": "Contains details about the resource being queried.
", + "base": "Contains details about the resource being queried.
This data type is used in the following API operations:
RetrieveAndGenerate request – in the retrieveAndGenerateConfiguration
field
Contains details about the resource being queried and the foundation model used for generation.
" + "RetrieveAndGenerateRequest$retrieveAndGenerateConfiguration": "Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
" } }, "RetrieveAndGenerateInput": { - "base": "Contains the query made to the knowledge base.
", + "base": "Contains the query made to the knowledge base.
This data type is used in the following API operations:
RetrieveAndGenerate request – in the input
field
Contains the query made to the knowledge base.
" + "RetrieveAndGenerateRequest$input": "Contains the query to be made to the knowledge base.
" } }, "RetrieveAndGenerateInputTextString": { @@ -505,7 +517,7 @@ } }, "RetrieveAndGenerateOutput": { - "base": "Contains the response generated from querying the knowledge base.
", + "base": "Contains the response generated from querying the knowledge base.
This data type is used in the following API operations:
RetrieveAndGenerate response – in the output
field
Contains the response generated from querying the knowledge base.
" } @@ -521,7 +533,7 @@ } }, "RetrieveAndGenerateSessionConfiguration": { - "base": "Contains configuration about the session with the knowledge base.
", + "base": "Contains configuration about the session with the knowledge base.
This data type is used in the following API operations:
RetrieveAndGenerate request – in the sessionConfiguration
field
Contains details about the session with the knowledge base.
" } @@ -543,7 +555,7 @@ } }, "RetrievedReference": { - "base": "Contains metadata about a sources cited for the generated response.
", + "base": "Contains metadata about a source cited for the generated response.
This data type is used in the following API operations:
RetrieveAndGenerate response – in the retrievedReferences
field
Retrieve response – in the retrievedReferences
field
Contains parameters that specify various attributes that persist across a session or prompt. You can define session state attributes as key-value pairs when writing a Lambda function for an action group or pass them when making an InvokeAgent request. Use session state attributes to control and provide conversational context for your agent and to help customize your agent's behavior. For more information, see Session context.
", + "base": "Contains parameters that specify various attributes that persist across a session or prompt. You can define session state attributes as key-value pairs when writing a Lambda function for an action group or pass them when making an InvokeAgent request. Use session state attributes to control and provide conversational context for your agent and to help customize your agent's behavior. For more information, see Control session context.
", "refs": { - "InvokeAgentRequest$sessionState": "Contains parameters that specify various attributes of the session.
" + "InvokeAgentRequest$sessionState": "Contains parameters that specify various attributes of the session. For more information, see Control session context.
" } }, "Source": { @@ -596,7 +608,7 @@ } }, "Span": { - "base": "Contains information about where the text with a citation begins and ends in the generated output.
", + "base": "Contains information about where the text with a citation begins and ends in the generated output.
This data type is used in the following API operations:
RetrieveAndGenerate response – in the span
field
Retrieve response – in the span
field
Contains information about where the text with a citation begins and ends in the generated output.
" } @@ -644,8 +656,14 @@ "InferenceConfiguration$temperature": "The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.
" } }, + "TextPromptTemplate": { + "base": null, + "refs": { + "PromptTemplate$textPromptTemplate": "The template for the prompt that's sent to the model for response generation. You can include prompt placeholders, which become replaced before the prompt is sent to the model to provide instructions and context to the model. In addition, you can include XML tags to delineate meaningful sections of the prompt template.
For more information, see the following resources:
" + } + }, "TextResponsePart": { - "base": "Contains the part of the generated text that contains a citation, alongside where it begins and ends.
", + "base": "Contains the part of the generated text that contains a citation, alongside where it begins and ends.
This data type is used in the following API operations:
RetrieveAndGenerate response – in the textResponsePart
field
Retrieve response – in the textResponsePart
field
Contains metadata about a textual part of the generated response that is accompanied by a citation.
" } diff --git a/models/apis/ce/2017-10-25/api-2.json b/models/apis/ce/2017-10-25/api-2.json index 357104d8abe..efdaf3944e6 100644 --- a/models/apis/ce/2017-10-25/api-2.json +++ b/models/apis/ce/2017-10-25/api-2.json @@ -387,6 +387,19 @@ {"shape":"UnresolvableUsageUnitException"} ] }, + "ListCostAllocationTagBackfillHistory":{ + "name":"ListCostAllocationTagBackfillHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCostAllocationTagBackfillHistoryRequest"}, + "output":{"shape":"ListCostAllocationTagBackfillHistoryResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidNextTokenException"} + ] + }, "ListCostAllocationTags":{ "name":"ListCostAllocationTags", "http":{ @@ -451,6 +464,19 @@ {"shape":"LimitExceededException"} ] }, + "StartCostAllocationTagBackfill":{ + "name":"StartCostAllocationTagBackfill", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartCostAllocationTagBackfillRequest"}, + "output":{"shape":"StartCostAllocationTagBackfillResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"BackfillLimitExceededException"} + ] + }, "StartSavingsPlansPurchaseRecommendationGeneration":{ "name":"StartSavingsPlansPurchaseRecommendationGeneration", "http":{ @@ -690,6 +716,13 @@ "key":{"shape":"AttributeType"}, "value":{"shape":"AttributeValue"} }, + "BackfillLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "BillExpirationException":{ "type":"structure", "members":{ @@ -720,6 +753,30 @@ "LastUsedDate":{"shape":"ZonedDateTime"} } }, + "CostAllocationTagBackfillRequest":{ + "type":"structure", + "members":{ + "BackfillFrom":{"shape":"ZonedDateTime"}, + "RequestedAt":{"shape":"ZonedDateTime"}, + "CompletedAt":{"shape":"ZonedDateTime"}, + "BackfillStatus":{"shape":"CostAllocationTagBackfillStatus"}, + "LastUpdatedAt":{"shape":"ZonedDateTime"} + } + }, + "CostAllocationTagBackfillRequestList":{ + "type":"list", + "member":{"shape":"CostAllocationTagBackfillRequest"}, + "max":1000, + "min":0 + }, + "CostAllocationTagBackfillStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "PROCESSING", + "FAILED" + ] + }, "CostAllocationTagKeyList":{ "type":"list", "member":{"shape":"TagKey"}, @@ -1927,6 +1984,23 @@ }, "exception":true }, + "ListCostAllocationTagBackfillHistoryRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextPageToken"}, + "MaxResults":{ + "shape":"CostAllocationTagsMaxResults", + "box":true + } + } + }, + "ListCostAllocationTagBackfillHistoryResponse":{ + "type":"structure", + "members":{ + "BackfillRequests":{"shape":"CostAllocationTagBackfillRequestList"}, + "NextToken":{"shape":"NextPageToken"} + } + }, "ListCostAllocationTagsRequest":{ "type":"structure", "members":{ @@ -2750,6 +2824,19 @@ "DESCENDING" ] }, + "StartCostAllocationTagBackfillRequest":{ + "type":"structure", + "required":["BackfillFrom"], + "members":{ + "BackfillFrom":{"shape":"ZonedDateTime"} + } + }, + "StartCostAllocationTagBackfillResponse":{ + "type":"structure", + "members":{ + "BackfillRequest":{"shape":"CostAllocationTagBackfillRequest"} + } + }, "StartSavingsPlansPurchaseRecommendationGenerationRequest":{ "type":"structure", "members":{ diff --git a/models/apis/ce/2017-10-25/docs-2.json b/models/apis/ce/2017-10-25/docs-2.json index 78855d4e627..74970522c56 100644 --- a/models/apis/ce/2017-10-25/docs-2.json +++ b/models/apis/ce/2017-10-25/docs-2.json @@ -29,11 +29,13 @@ "GetSavingsPlansUtilizationDetails": "Retrieves attribute data along with aggregate utilization and savings data for a given time period. This doesn't support granular or grouped data (daily/monthly) in response. You can't retrieve data by dates in a single response similar to GetSavingsPlanUtilization
, but you have the option to make multiple calls to GetSavingsPlanUtilizationDetails
by providing individual dates. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
GetSavingsPlanUtilizationDetails
internally groups data by SavingsPlansArn
.
Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string.
", "GetUsageForecast": "Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage.
", + "ListCostAllocationTagBackfillHistory": "Retrieves a list of your historical cost allocation tag backfill requests.
", "ListCostAllocationTags": "Get a list of cost allocation tags. All inputs in the API are optional and serve as filters. By default, all cost allocation tags are returned.
", "ListCostCategoryDefinitions": "Returns the name, Amazon Resource Name (ARN), NumberOfRules
and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn
to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn
specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd
is omitted in the response. ListCostCategoryDefinitions
supports pagination. The request can have a MaxResults
range up to 100.
Retrieves a list of your historical recommendation generations within the past 30 days.
", "ListTagsForResource": "Returns a list of resource tags associated with the resource specified by the Amazon Resource Name (ARN).
", "ProvideAnomalyFeedback": "Modifies the feedback property of a given cost anomaly.
", + "StartCostAllocationTagBackfill": " Request a cost allocation tag backfill. This will backfill the activation status (either active
or inactive
) for all tag keys from para:BackfillFrom
up to the when this request is made.
You can request a backfill once every 24 hours.
", "StartSavingsPlansPurchaseRecommendationGeneration": "Requests a Savings Plans recommendation generation. This enables you to calculate a fresh set of Savings Plans recommendations that takes your latest usage data and current Savings Plans inventory into account. You can refresh Savings Plans recommendations up to three times daily for a consolidated billing family.
StartSavingsPlansPurchaseRecommendationGeneration
has no request syntax because no input parameters are needed to support this operation.
An API operation for adding one or more tags (key-value pairs) to a resource.
You can use the TagResource
operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value you specify replaces the previous value for that tag.
Although the maximum number of array members is 200, user-tag maximum is 50. The remaining are reserved for Amazon Web Services use.
", "UntagResource": "Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value.
", @@ -183,6 +185,11 @@ "SavingsPlansUtilizationDetail$Attributes": "The attribute that applies to a specific Dimension
.
A request to backfill is already in progress. Once the previous request is complete, you can create another request.
", + "refs": { + } + }, "BillExpirationException": { "base": "The requested report expired. Update the date interval and try again.
", "refs": { @@ -200,6 +207,25 @@ "CostAllocationTagList$member": null } }, + "CostAllocationTagBackfillRequest": { + "base": "The cost allocation tag backfill request structure that contains metadata and details of a certain backfill.
", + "refs": { + "CostAllocationTagBackfillRequestList$member": null, + "StartCostAllocationTagBackfillResponse$BackfillRequest": "An object containing detailed metadata of your new backfill request.
" + } + }, + "CostAllocationTagBackfillRequestList": { + "base": null, + "refs": { + "ListCostAllocationTagBackfillHistoryResponse$BackfillRequests": "The list of historical cost allocation tag backfill requests.
" + } + }, + "CostAllocationTagBackfillStatus": { + "base": null, + "refs": { + "CostAllocationTagBackfillRequest$BackfillStatus": "The status of the cost allocation tag backfill request.
" + } + }, "CostAllocationTagKeyList": { "base": null, "refs": { @@ -242,6 +268,7 @@ "CostAllocationTagsMaxResults": { "base": null, "refs": { + "ListCostAllocationTagBackfillHistoryRequest$MaxResults": "The maximum number of objects that are returned for this request.
", "ListCostAllocationTagsRequest$MaxResults": "The maximum number of objects that are returned for this request. By default, the request returns 100 results.
" } }, @@ -665,6 +692,7 @@ "ErrorMessage": { "base": null, "refs": { + "BackfillLimitExceededException$Message": null, "BillExpirationException$Message": null, "DataUnavailableException$Message": null, "GenerationExistsException$Message": null, @@ -1297,6 +1325,16 @@ "refs": { } }, + "ListCostAllocationTagBackfillHistoryRequest": { + "base": null, + "refs": { + } + }, + "ListCostAllocationTagBackfillHistoryResponse": { + "base": null, + "refs": { + } + }, "ListCostAllocationTagsRequest": { "base": null, "refs": { @@ -1506,6 +1544,8 @@ "GetSavingsPlansUtilizationDetailsResponse$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", "GetTagsRequest$NextPageToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", "GetTagsResponse$NextPageToken": "The token for the next set of retrievable results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", + "ListCostAllocationTagBackfillHistoryRequest$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", + "ListCostAllocationTagBackfillHistoryResponse$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", "ListCostAllocationTagsRequest$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", "ListCostAllocationTagsResponse$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", "ListCostCategoryDefinitionsRequest$NextToken": "The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.
", @@ -2098,6 +2138,16 @@ "SortDefinition$SortOrder": "The order that's used to sort the data.
" } }, + "StartCostAllocationTagBackfillRequest": { + "base": null, + "refs": { + } + }, + "StartCostAllocationTagBackfillResponse": { + "base": null, + "refs": { + } + }, "StartSavingsPlansPurchaseRecommendationGenerationRequest": { "base": null, "refs": { @@ -2423,6 +2473,10 @@ "refs": { "CostAllocationTag$LastUpdatedDate": "The last date that the tag was either activated or deactivated.
", "CostAllocationTag$LastUsedDate": "The last month that the tag was used on an Amazon Web Services resource.
", + "CostAllocationTagBackfillRequest$BackfillFrom": "The date the backfill starts from.
", + "CostAllocationTagBackfillRequest$RequestedAt": "The time when the backfill was requested.
", + "CostAllocationTagBackfillRequest$CompletedAt": "The backfill completion time.
", + "CostAllocationTagBackfillRequest$LastUpdatedAt": "The time when the backfill status was last updated.
", "CostCategory$EffectiveStart": "The effective start date of your Cost Category.
", "CostCategory$EffectiveEnd": "The effective end date of your Cost Category.
", "CostCategoryReference$EffectiveStart": "The Cost Category's effective start date.
", @@ -2438,6 +2492,7 @@ "RecommendationDetailData$GenerationTimestamp": null, "RecommendationDetailData$LatestUsageTimestamp": null, "RecommendationDetailHourlyMetrics$StartTime": null, + "StartCostAllocationTagBackfillRequest$BackfillFrom": "The date you want the backfill to start from. The date can only be a first day of the month (a billing start date). Dates can't precede the previous twelve months, or in the future.
", "StartSavingsPlansPurchaseRecommendationGenerationResponse$GenerationStartedTime": "The start time of the recommendation generation.
", "StartSavingsPlansPurchaseRecommendationGenerationResponse$EstimatedCompletionTime": "The estimated time for when the recommendation generation will complete.
", "UpdateCostCategoryDefinitionRequest$EffectiveStart": "The Cost Category's effective start date. It can only be a billing start date (first day of the month). If the date isn't provided, it's the first day of the current month. Dates can't be before the previous twelve months, or in the future.
", diff --git a/models/apis/ce/2017-10-25/paginators-1.json b/models/apis/ce/2017-10-25/paginators-1.json index 1b58ad8f0e2..b3d1c4d1ea2 100644 --- a/models/apis/ce/2017-10-25/paginators-1.json +++ b/models/apis/ce/2017-10-25/paginators-1.json @@ -10,6 +10,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListCostAllocationTagBackfillHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListCostAllocationTags": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index c112443c4a3..ee73b8b16e9 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -35401,10 +35401,7 @@ "NewDhcpConfiguration":{ "type":"structure", "members":{ - "Key":{ - "shape":"String", - "locationName":"key" - }, + "Key":{"shape":"String"}, "Values":{ "shape":"ValueStringList", "locationName":"Value" diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 3f188e0db51..f357aba64d2 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -15973,7 +15973,7 @@ } }, "NewDhcpConfiguration": { - "base": null, + "base": "Describes a DHCP configuration option.
", "refs": { "NewDhcpConfigurationList$member": null } @@ -20701,7 +20701,7 @@ "NetworkInterfacePrivateIpAddress$PrivateDnsName": "The private DNS name.
", "NetworkInterfacePrivateIpAddress$PrivateIpAddress": "The private IPv4 address.
", "NetworkNodesList$member": null, - "NewDhcpConfiguration$Key": null, + "NewDhcpConfiguration$Key": "The name of a DHCP option.
", "OidcOptions$Issuer": "The OIDC issuer.
", "OidcOptions$AuthorizationEndpoint": "The OIDC authorization endpoint.
", "OidcOptions$TokenEndpoint": "The OIDC token endpoint.
", @@ -22533,7 +22533,7 @@ "GetTransitGatewayRouteTableAssociationsRequest$MaxResults": "The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The maximum number of routes to return. If a value is not provided, the default is 1000.
" + "SearchTransitGatewayRoutesRequest$MaxResults": "The maximum number of routes to return.
" } }, "TransitGatewayMulitcastDomainAssociationState": { @@ -23364,7 +23364,7 @@ "ModifyVpcEndpointServicePermissionsRequest$RemoveAllowedPrincipals": "The Amazon Resource Names (ARN) of the principals. Permissions are revoked for principals in this list.
", "NetworkInsightsAnalysis$AdditionalAccounts": "The member accounts that contain resources that the path can traverse.
", "NetworkInsightsAnalysis$SuggestedAccounts": "Potential intermediate accounts.
", - "NewDhcpConfiguration$Values": null, + "NewDhcpConfiguration$Values": "The values for the DHCP option.
", "PacketHeaderStatement$SourceAddresses": "The source addresses.
", "PacketHeaderStatement$DestinationAddresses": "The destination addresses.
", "PacketHeaderStatement$SourcePorts": "The source ports.
", diff --git a/models/apis/ecs/2014-11-13/docs-2.json b/models/apis/ecs/2014-11-13/docs-2.json index 5c152775d06..ccde249fab1 100644 --- a/models/apis/ecs/2014-11-13/docs-2.json +++ b/models/apis/ecs/2014-11-13/docs-2.json @@ -4,8 +4,8 @@ "operations": { "CreateCapacityProvider": "Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.
Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on Fargate use the FARGATE
and FARGATE_SPOT
capacity providers. These providers are available to all accounts in the Amazon Web Services Regions that Fargate supports.
Creates a new Amazon ECS cluster. By default, your account receives a default
cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", - "CreateTaskSet": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
", + "CreateService": "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", + "CreateTaskSet": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
", "DeleteAccountSetting": "Disables an account setting for a specified user, role, or the root user for an account.
", "DeleteAttributes": "Deletes one or more custom attributes from an Amazon ECS resource.
", "DeleteCapacityProvider": "Deletes the specified capacity provider.
The FARGATE
and FARGATE_SPOT
capacity providers are reserved and can't be deleted. You can disassociate them from a cluster using either the PutClusterCapacityProviders API or by deleting the cluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment
option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that aren't associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Modifies the available capacity providers and the default capacity provider strategy for a cluster.
You must specify both the available capacity providers and a default capacity provider strategy for the cluster. If the specified cluster has existing capacity providers associated with it, you must specify all existing capacity providers in addition to any new ones you want to add. Any existing capacity providers that are associated with a cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the cluster. You can only disassociate an existing capacity provider from a cluster if it's not being used by any existing tasks.
When creating a service or running a task on a cluster, if no capacity provider or launch type is specified, then the cluster's default capacity provider strategy is used. We recommend that you define a default capacity provider strategy for your cluster. However, you must specify an empty array ([]
) to bypass defining a default strategy.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.
", "RegisterTaskDefinition": "Registers a new task definition from the supplied family
and containerDefinitions
. Optionally, you can add data volumes to your containers with the volumes
parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition with the networkMode
parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc
network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
Starts a new task using the specified task definition.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task from the specified task definition on the specified container instance or instances.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
", + "RunTask": "Starts a new task using the specified task definition.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task from the specified task definition on the specified container instance or instances.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
", "StopTask": "Stops a running task. Any tags associated with the task will be deleted.
When StopTask is called on a task, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
value and a default 30-second timeout, after which the SIGKILL
value is sent and the containers are forcibly stopped. If the container handles the SIGTERM
value gracefully and exits within 30 seconds from receiving it, no SIGKILL
value is sent.
The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Sent to acknowledge that an attachment changed states.
", "SubmitContainerStateChange": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a container changed states.
", @@ -54,7 +54,7 @@ "UpdateClusterSettings": "Modifies the settings to use for a cluster.
", "UpdateContainerAgent": "Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent doesn't interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.
The UpdateContainerAgent
API isn't supported for container instances using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the ecs-init
package. This updates the agent. For more information, see Updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Agent updates with the UpdateContainerAgent
API operation do not apply to Windows container instances. We recommend that you launch new container instances to update the agent version in your Windows clusters.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with the ecs-init
service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Modifies the status of an Amazon ECS container instance.
Once a container instance has reached an ACTIVE
state, you can change the status of a container instance to DRAINING
to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.
A container instance can't be changed to DRAINING
until it has reached an ACTIVE
status. If the instance is in any other status, an error will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING
state are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent
and maximumPercent
. You can change the deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during task replacement. For example, desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to ACTIVE
status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.
Modifies the parameters of a service.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations
null
, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Modifies the parameters of a service.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations
null
, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Modifies which task set in a service is the primary task set. Any parameters that are updated on the primary task set in a service will transition to the service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Updates the protection status of a task. You can set protectionEnabled
to true
to protect your task from termination during scale-in events from Service Autoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the protectionEnabled
property making the task eligible for termination by a subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes (48 hours). To specify the custom expiration period, set the expiresInMinutes
property. The expiresInMinutes
property is always reset when you invoke this operation for a task that already has protectionEnabled
set to true
. You can keep extending the protection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in protection in the Amazon Elastic Container Service Developer Guide .
This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation for a standalone task will result in an TASK_NOT_VALID
failure. For more information, see API failure reasons.
If you prefer to set task protection from within the container, we recommend using the Task scale-in protection endpoint.
Modifies a task set. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Creates a new volume with a specific amount of throughput and storage capacity.
", "DeleteEnvironment": "Delete an FinSpace environment.
", "DeleteKxCluster": "Deletes a kdb cluster.
", + "DeleteKxClusterNode": "Deletes the specified nodes from a cluster.
", "DeleteKxDatabase": "Deletes the specified database and all of its associated data. This action is irreversible. You must copy any data out of the database before deleting it if the data is to be retained.
", "DeleteKxDataview": "Deletes the specified dataview. Before deleting a dataview, make sure that it is not in use by any cluster.
", "DeleteKxEnvironment": "Deletes the kdb environment. This action is irreversible. Deleting a kdb environment will remove all the associated data and any services running in it.
", @@ -147,7 +148,7 @@ "ChangeRequests": { "base": null, "refs": { - "CreateKxChangesetRequest$changeRequests": "A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. A changeType can has the following values:
PUT – Adds or updates files in a database.
DELETE – Deletes files in a database.
All the change requests require a mandatory dbPath
attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path
attribute defines the s3 source file path and is required for a PUT change type. The s3path
must end with a trailing / if it is a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
This request adds a single sym file at database root location.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}
This request adds files in the given s3Path
under the 2020.01.02 partition of the database.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}
This request adds files in the given s3Path
under the taq table partition of the database.
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
This request deletes the 2020.01.02 partition of the database.
[{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it.
[ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. A changeType can have the following values:
PUT – Adds or updates files in a database.
DELETE – Deletes files in a database.
All the change requests require a mandatory dbPath
attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path
attribute defines the s3 source file path and is required for a PUT change type. The s3path
must end with a trailing / if it is a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
This request adds a single sym file at database root location.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}
This request adds files in the given s3Path
under the 2020.01.02 partition of the database.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}
This request adds files in the given s3Path
under the taq table partition of the database.
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
This request deletes the 2020.01.02 partition of the database.
[{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it.
[ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
A list of change requests.
", "GetKxChangesetResponse$changeRequests": "A list of change request objects that are run in order.
" } @@ -428,6 +429,16 @@ "refs": { } }, + "DeleteKxClusterNodeRequest": { + "base": null, + "refs": { + } + }, + "DeleteKxClusterNodeResponse": { + "base": null, + "refs": { + } + }, "DeleteKxClusterRequest": { "base": null, "refs": { @@ -987,6 +998,7 @@ "AttachedClusterList$member": null, "CreateKxClusterRequest$clusterName": "A unique name for the cluster that you want to create.
", "CreateKxClusterResponse$clusterName": "A unique name for the cluster.
", + "DeleteKxClusterNodeRequest$clusterName": "The name of the cluster, for which you want to delete the nodes.
", "DeleteKxClusterRequest$clusterName": "The name of the cluster that you want to delete.
", "GetKxClusterRequest$clusterName": "The name of the cluster that you want to retrieve.
", "GetKxClusterResponse$clusterName": "A unique name for the cluster.
", @@ -1009,6 +1021,7 @@ "KxClusterNodeIdString": { "base": null, "refs": { + "DeleteKxClusterNodeRequest$nodeId": "A unique identifier for the node that you want to delete.
", "KxNode$nodeId": "A unique identifier for the node.
" } }, @@ -1224,6 +1237,7 @@ "CreateKxScalingGroupResponse$environmentId": "A unique identifier for the kdb environment, where you create the scaling group.
", "CreateKxVolumeRequest$environmentId": "A unique identifier for the kdb environment, whose clusters can attach to the volume.
", "CreateKxVolumeResponse$environmentId": "A unique identifier for the kdb environment, whose clusters can attach to the volume.
", + "DeleteKxClusterNodeRequest$environmentId": "A unique identifier for the kdb environment.
", "DeleteKxClusterRequest$environmentId": "A unique identifier for the kdb environment.
", "DeleteKxScalingGroupRequest$environmentId": "A unique identifier for the kdb environment, from where you want to delete the dataview.
", "DeleteKxVolumeRequest$environmentId": "A unique identifier for the kdb environment, whose clusters can attach to the volume.
", @@ -1281,7 +1295,7 @@ "KxNAS1Size": { "base": null, "refs": { - "KxNAS1Configuration$size": "The size of the network attached storage.
" + "KxNAS1Configuration$size": " The size of the network attached storage. For storage type SSD_1000
and SSD_250
you can select the minimum size as 1200 GB or increments of 2400 GB. For storage type HDD_12
you can select the minimum size as 6000 GB or increments of 6000 GB.
Specifies the status of the cluster nodes.
RUNNING
– The node is actively serving.
PROVISIONING
– The node is being prepared.