Skip to content

Commit

Permalink
Updated service API models for release.
Browse files Browse the repository at this point in the history
  • Loading branch information
AWS SDK for Ruby authored and Nobody committed Sep 22, 2020
1 parent 83f8c08 commit a8276e8
Show file tree
Hide file tree
Showing 10 changed files with 530 additions and 43 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
Unreleased Changes
------------------

* Feature - Aws::Comprehend - Updated the API, and documentation for Amazon Comprehend.

* Feature - Aws::LexModelBuildingService - Updated the API, and documentation for Amazon Lex Model Building Service.

* Feature - Aws::DynamoDBStreams - Updated the API, and documentation for Amazon DynamoDB Streams.

* Feature - Aws::WorkMail - Updated the API, and documentation for Amazon WorkMail.

2.11.590 (2020-09-21)
------------------

Expand Down
59 changes: 51 additions & 8 deletions aws-sdk-core/apis/comprehend/2017-11-27/api-2.json
Original file line number Diff line number Diff line change
Expand Up @@ -879,6 +879,27 @@
},
"shapes":{
"AnyLengthString":{"type":"string"},
"AttributeNamesList":{
"type":"list",
"member":{"shape":"AttributeNamesListItem"}
},
"AttributeNamesListItem":{
"type":"string",
"max":63,
"min":1,
"pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*"
},
"AugmentedManifestsListItem":{
"type":"structure",
"required":[
"S3Uri",
"AttributeNames"
],
"members":{
"S3Uri":{"shape":"S3Uri"},
"AttributeNames":{"shape":"AttributeNamesList"}
}
},
"BatchDetectDominantLanguageItemResult":{
"type":"structure",
"members":{
Expand Down Expand Up @@ -1527,6 +1548,17 @@
"max":256,
"pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier/[a-zA-Z0-9](-*[a-zA-Z0-9])*"
},
"DocumentClassifierAugmentedManifestsList":{
"type":"list",
"member":{"shape":"AugmentedManifestsListItem"}
},
"DocumentClassifierDataFormat":{
"type":"string",
"enum":[
"COMPREHEND_CSV",
"AUGMENTED_MANIFEST"
]
},
"DocumentClassifierEndpointArn":{
"type":"string",
"max":256,
Expand All @@ -1542,10 +1574,11 @@
},
"DocumentClassifierInputDataConfig":{
"type":"structure",
"required":["S3Uri"],
"members":{
"DataFormat":{"shape":"DocumentClassifierDataFormat"},
"S3Uri":{"shape":"S3Uri"},
"LabelDelimiter":{"shape":"LabelDelimiter"}
"LabelDelimiter":{"shape":"LabelDelimiter"},
"AugmentedManifests":{"shape":"DocumentClassifierAugmentedManifestsList"}
}
},
"DocumentClassifierMode":{
Expand Down Expand Up @@ -1719,6 +1752,17 @@
"max":256,
"pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer/[a-zA-Z0-9](-*[a-zA-Z0-9])*"
},
"EntityRecognizerAugmentedManifestsList":{
"type":"list",
"member":{"shape":"AugmentedManifestsListItem"}
},
"EntityRecognizerDataFormat":{
"type":"string",
"enum":[
"COMPREHEND_CSV",
"AUGMENTED_MANIFEST"
]
},
"EntityRecognizerDocuments":{
"type":"structure",
"required":["S3Uri"],
Expand Down Expand Up @@ -1756,15 +1800,14 @@
},
"EntityRecognizerInputDataConfig":{
"type":"structure",
"required":[
"EntityTypes",
"Documents"
],
"required":["EntityTypes"],
"members":{
"DataFormat":{"shape":"EntityRecognizerDataFormat"},
"EntityTypes":{"shape":"EntityTypesList"},
"Documents":{"shape":"EntityRecognizerDocuments"},
"Annotations":{"shape":"EntityRecognizerAnnotations"},
"EntityList":{"shape":"EntityRecognizerEntityList"}
"EntityList":{"shape":"EntityRecognizerEntityList"},
"AugmentedManifests":{"shape":"EntityRecognizerAugmentedManifestsList"}
}
},
"EntityRecognizerMetadata":{
Expand Down Expand Up @@ -1828,7 +1871,7 @@
"EntityTypeName":{
"type":"string",
"max":64,
"pattern":"[_A-Z0-9]+"
"pattern":"^(?:(?!\\\\n+|\\\\t+|\\\\r+|[\\r\\t\\n\\s,]).)+$"
},
"EntityTypesEvaluationMetrics":{
"type":"structure",
Expand Down
60 changes: 52 additions & 8 deletions aws-sdk-core/apis/comprehend/2017-11-27/docs-2.json
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,25 @@
"TopicsDetectionJobProperties$Message": "<p>A description for the status of a job.</p>"
}
},
"AttributeNamesList": {
"base": null,
"refs": {
"AugmentedManifestsListItem$AttributeNames": "<p>The JSON attribute that contains the annotations for your training documents. The number of attribute names that you specify depends on whether your augmented manifest file is the output of a single labeling job or a chained labeling job.</p> <p>If your file is the output of a single labeling job, specify the LabelAttributeName key that was used when the job was created in Ground Truth.</p> <p>If your file is the output of a chained labeling job, specify the LabelAttributeName key for one or more jobs in the chain. Each LabelAttributeName key provides the annotations from an individual job.</p>"
}
},
"AttributeNamesListItem": {
"base": null,
"refs": {
"AttributeNamesList$member": null
}
},
"AugmentedManifestsListItem": {
"base": "<p>An augmented manifest file that provides training data for your custom model. An augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground Truth.</p>",
"refs": {
"DocumentClassifierAugmentedManifestsList$member": null,
"EntityRecognizerAugmentedManifestsList$member": null
}
},
"BatchDetectDominantLanguageItemResult": {
"base": "<p>The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.</p>",
"refs": {
Expand Down Expand Up @@ -538,6 +557,18 @@
"StopTrainingDocumentClassifierRequest$DocumentClassifierArn": "<p>The Amazon Resource Name (ARN) that identifies the document classifier currently being trained.</p>"
}
},
"DocumentClassifierAugmentedManifestsList": {
"base": null,
"refs": {
"DocumentClassifierInputDataConfig$AugmentedManifests": "<p>A list of augmented manifest files that provide training data for your custom model. An augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground Truth.</p> <p>This parameter is required if you set <code>DataFormat</code> to <code>AUGMENTED_MANIFEST</code>.</p>"
}
},
"DocumentClassifierDataFormat": {
"base": null,
"refs": {
"DocumentClassifierInputDataConfig$DataFormat": "<p>The format of your training data:</p> <ul> <li> <p> <code>COMPREHEND_CSV</code>: A two-column CSV file, where labels are provided in the first column, and documents are provided in the second. If you use this value, you must provide the <code>S3Uri</code> parameter in your request.</p> </li> <li> <p> <code>AUGMENTED_MANIFEST</code>: A labeled dataset that is produced by Amazon SageMaker Ground Truth. This file is in JSON lines format. Each line is a complete JSON object that contains a training document and its associated labels. </p> <p>If you use this value, you must provide the <code>AugmentedManifests</code> parameter in your request.</p> </li> </ul> <p>If you don't specify a value, Amazon Comprehend uses <code>COMPREHEND_CSV</code> as the default.</p>"
}
},
"DocumentClassifierEndpointArn": {
"base": null,
"refs": {
Expand Down Expand Up @@ -688,7 +719,7 @@
"EntityRecognizerAnnotations": {
"base": "<p>Describes the annotations associated with a entity recognizer.</p>",
"refs": {
"EntityRecognizerInputDataConfig$Annotations": "<p>S3 location of the annotations file for an entity recognizer.</p>"
"EntityRecognizerInputDataConfig$Annotations": "<p>The S3 location of the CSV file that annotates your training documents.</p>"
}
},
"EntityRecognizerArn": {
Expand All @@ -703,10 +734,22 @@
"StopTrainingEntityRecognizerRequest$EntityRecognizerArn": "<p>The Amazon Resource Name (ARN) that identifies the entity recognizer currently being trained.</p>"
}
},
"EntityRecognizerAugmentedManifestsList": {
"base": null,
"refs": {
"EntityRecognizerInputDataConfig$AugmentedManifests": "<p>A list of augmented manifest files that provide training data for your custom model. An augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground Truth.</p> <p>This parameter is required if you set <code>DataFormat</code> to <code>AUGMENTED_MANIFEST</code>.</p>"
}
},
"EntityRecognizerDataFormat": {
"base": null,
"refs": {
"EntityRecognizerInputDataConfig$DataFormat": "<p>The format of your training data:</p> <ul> <li> <p> <code>COMPREHEND_CSV</code>: A CSV file that supplements your training documents. The CSV file contains information about the custom entities that your trained model will detect. The required format of the file depends on whether you are providing annotations or an entity list.</p> <p>If you use this value, you must provide your CSV file by using either the <code>Annotations</code> or <code>EntityList</code> parameters. You must provide your training documents by using the <code>Documents</code> parameter.</p> </li> <li> <p> <code>AUGMENTED_MANIFEST</code>: A labeled dataset that is produced by Amazon SageMaker Ground Truth. This file is in JSON lines format. Each line is a complete JSON object that contains a training document and its labels. Each label annotates a named entity in the training document. </p> <p>If you use this value, you must provide the <code>AugmentedManifests</code> parameter in your request.</p> </li> </ul> <p>If you don't specify a value, Amazon Comprehend uses <code>COMPREHEND_CSV</code> as the default.</p>"
}
},
"EntityRecognizerDocuments": {
"base": "<p>Describes the training documents submitted with an entity recognizer.</p>",
"refs": {
"EntityRecognizerInputDataConfig$Documents": "<p>S3 location of the documents folder for an entity recognizer</p>"
"EntityRecognizerInputDataConfig$Documents": "<p>The S3 location of the folder that contains the training documents for your custom entity recognizer.</p> <p>This parameter is required if you set <code>DataFormat</code> to <code>COMPREHEND_CSV</code>.</p>"
}
},
"EntityRecognizerEndpointArn": {
Expand All @@ -718,7 +761,7 @@
"EntityRecognizerEntityList": {
"base": "<p>Describes the entity recognizer submitted with an entity recognizer.</p>",
"refs": {
"EntityRecognizerInputDataConfig$EntityList": "<p>S3 location of the entity list for an entity recognizer.</p>"
"EntityRecognizerInputDataConfig$EntityList": "<p>The S3 location of the CSV file that has the entity list for your custom entity recognizer.</p>"
}
},
"EntityRecognizerEvaluationMetrics": {
Expand Down Expand Up @@ -780,7 +823,7 @@
"EntityTypeName": {
"base": null,
"refs": {
"EntityTypesListItem$Type": "<p>Entity type of an item on an entity type list.</p>"
"EntityTypesListItem$Type": "<p>An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.</p> <p>Entity types must not contain the following invalid characters: \\n (line break), \\\\n (escaped line break, \\r (carriage return), \\\\r (escaped carriage return), \\t (tab), \\\\t (escaped tab), space, and , (comma).</p>"
}
},
"EntityTypesEvaluationMetrics": {
Expand All @@ -792,11 +835,11 @@
"EntityTypesList": {
"base": null,
"refs": {
"EntityRecognizerInputDataConfig$EntityTypes": "<p>The entity types in the input data for an entity recognizer. A maximum of 25 entity types can be used at one time to train an entity recognizer.</p>"
"EntityRecognizerInputDataConfig$EntityTypes": "<p>The entity types in the labeled training data that Amazon Comprehend uses to train the custom entity recognizer. Any entity types that you don't specify are ignored.</p> <p>A maximum of 25 entity types can be used at one time to train an entity recognizer. Entity types must not contain the following invalid characters: \\n (line break), \\\\n (escaped line break), \\r (carriage return), \\\\r (escaped carriage return), \\t (tab), \\\\t (escaped tab), space, and , (comma). </p>"
}
},
"EntityTypesListItem": {
"base": "<p>Information about an individual item on a list of entity types.</p>",
"base": "<p>An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.</p>",
"refs": {
"EntityTypesList$member": null
}
Expand Down Expand Up @@ -1314,7 +1357,7 @@
"ModelStatus": {
"base": null,
"refs": {
"DocumentClassifierFilter$Status": "<p>Filters the list of classifiers based on status. </p>",
"DocumentClassifierFilter$Status": "<p>Filters the list of classifiers based on status.</p>",
"DocumentClassifierProperties$Status": "<p>The status of the document classifier. If the status is <code>TRAINED</code> the classifier is ready to use. If the status is <code>FAILED</code> you can see additional information about why the classifier wasn't trained in the <code>Message</code> field.</p>",
"EntityRecognizerFilter$Status": "<p>The status of an entity recognizer.</p>",
"EntityRecognizerProperties$Status": "<p>Provides the status of the entity recognizer.</p>"
Expand Down Expand Up @@ -1437,7 +1480,8 @@
"S3Uri": {
"base": null,
"refs": {
"DocumentClassifierInputDataConfig$S3Uri": "<p>The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.</p> <p>For example, if you use the URI <code>S3://bucketName/prefix</code>, if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.</p>",
"AugmentedManifestsListItem$S3Uri": "<p>The Amazon S3 location of the augmented manifest file.</p>",
"DocumentClassifierInputDataConfig$S3Uri": "<p>The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.</p> <p>For example, if you use the URI <code>S3://bucketName/prefix</code>, if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.</p> <p>This parameter is required if you set <code>DataFormat</code> to <code>COMPREHEND_CSV</code>.</p>",
"DocumentClassifierOutputDataConfig$S3Uri": "<p>When you use the <code>OutputDataConfig</code> object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file.</p> <p>When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The <code>S3Uri</code> field contains the location of the output file, called <code>output.tar.gz</code>. It is a compressed archive that contains the confusion matrix.</p>",
"EntityRecognizerAnnotations$S3Uri": "<p> Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.</p>",
"EntityRecognizerDocuments$S3Uri": "<p> Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.</p>",
Expand Down
7 changes: 4 additions & 3 deletions aws-sdk-core/apis/lex-models/2017-04-19/api-2.json
Original file line number Diff line number Diff line change
Expand Up @@ -1928,10 +1928,11 @@
"Locale":{
"type":"string",
"enum":[
"en-US",
"en-GB",
"de-DE",
"en-AU"
"en-AU",
"en-GB",
"en-US",
"es-US"
]
},
"LocaleList":{
Expand Down
Loading

0 comments on commit a8276e8

Please sign in to comment.