diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index f8c48186f667..2a59a9623687 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -3,6 +3,7 @@ ## 1.0.0b5 (Unreleased) **New features** +- We now have a `warnings` property on each document-level response object returned from the endpoints. It is a list of `TextAnalyticsWarning`s. - Added `text` property to `SentenceSentiment` **Breaking changes** diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py index 09bcd693a45c..748335a88191 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py @@ -17,6 +17,7 @@ RecognizeEntitiesResult, DetectLanguageResult, TextAnalyticsError, + TextAnalyticsWarning, ExtractKeyPhrasesResult, RecognizeLinkedEntitiesResult, TextDocumentStatistics, @@ -35,6 +36,7 @@ 'DetectLanguageResult', 'CategorizedEntity', 'TextAnalyticsError', + 'TextAnalyticsWarning', 'ExtractKeyPhrasesResult', 'RecognizeLinkedEntitiesResult', 'AnalyzeSentimentResult', diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py index 2560d01d3bf1..36bc56ba0d5a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py @@ -101,6 +101,9 @@ class RecognizeEntitiesResult(DictMixin): :param entities: Recognized entities in the document. :type entities: list[~azure.ai.textanalytics.CategorizedEntity] + :param warnings: Warnings encountered while processing document. Results will still be returned + if there are warnings, but they may not be fully accurate. + :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :param statistics: If show_stats=true was specified in the request this field will contain information about the document payload. :type statistics: @@ -112,12 +115,13 @@ class RecognizeEntitiesResult(DictMixin): def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.entities = kwargs.get("entities", None) + self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): - return "RecognizeEntitiesResult(id={}, entities={}, statistics={}, is_error={})" \ - .format(self.id, repr(self.entities), repr(self.statistics), self.is_error)[:1024] + return "RecognizeEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \ + .format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024] class DetectLanguageResult(DictMixin): @@ -130,6 +134,9 @@ class DetectLanguageResult(DictMixin): :type id: str :param primary_language: The primary language detected in the document. :type primary_language: ~azure.ai.textanalytics.DetectedLanguage + :param warnings: Warnings encountered while processing document. Results will still be returned + if there are warnings, but they may not be fully accurate. + :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :param statistics: If show_stats=true was specified in the request this field will contain information about the document payload. :type statistics: @@ -141,12 +148,14 @@ class DetectLanguageResult(DictMixin): def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.primary_language = kwargs.get("primary_language", None) + self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): - return "DetectLanguageResult(id={}, primary_language={}, statistics={}, is_error={})" \ - .format(self.id, repr(self.primary_language), repr(self.statistics), self.is_error)[:1024] + return "DetectLanguageResult(id={}, primary_language={}, warnings={}, statistics={}, "\ + "is_error={})".format(self.id, repr(self.primary_language), repr(self.warnings), + repr(self.statistics), self.is_error)[:1024] class CategorizedEntity(DictMixin): @@ -235,6 +244,32 @@ def __repr__(self): return "TextAnalyticsError(code={}, message={}, target={})" \ .format(self.code, self.message, self.target)[:1024] +class TextAnalyticsWarning(DictMixin): + """TextAnalyticsWarning contains the warning code and message that explains why + the response has a warning. + + :param code: Warning code. Possible values include: 'LongWordsInDocument', + 'DocumentTruncated'. + :type code: str + :param message: Warning message. + :type message: str + """ + + def __init__(self, **kwargs): + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + @classmethod + def _from_generated(cls, warning): + return cls( + code=warning.code, + message=warning.message, + ) + + def __repr__(self): + return "TextAnalyticsWarning(code={}, message={})" \ + .format(self.code, self.message)[:1024] + class ExtractKeyPhrasesResult(DictMixin): """ExtractKeyPhrasesResult is a result object which contains @@ -248,6 +283,9 @@ class ExtractKeyPhrasesResult(DictMixin): The number of key phrases returned is proportional to the number of words in the input document. :type key_phrases: list[str] + :param warnings: Warnings encountered while processing document. Results will still be returned + if there are warnings, but they may not be fully accurate. + :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :param statistics: If show_stats=true was specified in the request this field will contain information about the document payload. :type statistics: @@ -259,12 +297,13 @@ class ExtractKeyPhrasesResult(DictMixin): def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.key_phrases = kwargs.get("key_phrases", None) + self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): - return "ExtractKeyPhrasesResult(id={}, key_phrases={}, statistics={}, is_error={})" \ - .format(self.id, self.key_phrases, repr(self.statistics), self.is_error)[:1024] + return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})" \ + .format(self.id, self.key_phrases, repr(self.warnings), repr(self.statistics), self.is_error)[:1024] class RecognizeLinkedEntitiesResult(DictMixin): @@ -278,6 +317,9 @@ class RecognizeLinkedEntitiesResult(DictMixin): :param entities: Recognized well-known entities in the document. :type entities: list[~azure.ai.textanalytics.LinkedEntity] + :param warnings: Warnings encountered while processing document. Results will still be returned + if there are warnings, but they may not be fully accurate. + :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :param statistics: If show_stats=true was specified in the request this field will contain information about the document payload. :type statistics: @@ -289,12 +331,13 @@ class RecognizeLinkedEntitiesResult(DictMixin): def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.entities = kwargs.get("entities", None) + self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): - return "RecognizeLinkedEntitiesResult(id={}, entities={}, statistics={}, is_error={})" \ - .format(self.id, repr(self.entities), repr(self.statistics), self.is_error)[:1024] + return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \ + .format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024] class AnalyzeSentimentResult(DictMixin): @@ -310,6 +353,9 @@ class AnalyzeSentimentResult(DictMixin): Neutral, Positive, or Mixed). Possible values include: 'positive', 'neutral', 'negative', 'mixed' :type sentiment: str + :param warnings: Warnings encountered while processing document. Results will still be returned + if there are warnings, but they may not be fully accurate. + :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :param statistics: If show_stats=true was specified in the request this field will contain information about the document payload. :type statistics: @@ -328,15 +374,17 @@ class AnalyzeSentimentResult(DictMixin): def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.sentiment = kwargs.get("sentiment", None) + self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.confidence_scores = kwargs.get("confidence_scores", None) self.sentences = kwargs.get("sentences", None) self.is_error = False def __repr__(self): - return "AnalyzeSentimentResult(id={}, sentiment={}, statistics={}, confidence_scores={}, sentences={}, " \ - "is_error={})".format(self.id, self.sentiment, repr(self.statistics), repr(self.confidence_scores), - repr(self.sentences), self.is_error)[:1024] + return "AnalyzeSentimentResult(id={}, sentiment={}, warnings={}, statistics={}, confidence_scores={}, "\ + "sentences={}, is_error={})".format( + self.id, self.sentiment, repr(self.warnings), repr(self.statistics), + repr(self.confidence_scores), repr(self.sentences), self.is_error)[:1024] class TextDocumentStatistics(DictMixin): diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py index bf4226e73e9b..f14e24ad6b58 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py @@ -23,7 +23,8 @@ DetectedLanguage, DocumentError, SentimentConfidenceScores, - TextAnalyticsError + TextAnalyticsError, + TextAnalyticsWarning ) class CSODataV4Format(ODataV4Format): @@ -80,6 +81,7 @@ def language_result(language): return DetectLanguageResult( id=language.id, primary_language=DetectedLanguage._from_generated(language.detected_language), # pylint: disable=protected-access + warnings=[TextAnalyticsWarning._from_generated(w) for w in language.warnings], # pylint: disable=protected-access statistics=TextDocumentStatistics._from_generated(language.statistics), # pylint: disable=protected-access ) @@ -89,6 +91,7 @@ def entities_result(entity): return RecognizeEntitiesResult( id=entity.id, entities=[CategorizedEntity._from_generated(e) for e in entity.entities], # pylint: disable=protected-access + warnings=[TextAnalyticsWarning._from_generated(w) for w in entity.warnings], # pylint: disable=protected-access statistics=TextDocumentStatistics._from_generated(entity.statistics), # pylint: disable=protected-access ) @@ -98,6 +101,7 @@ def linked_entities_result(entity): return RecognizeLinkedEntitiesResult( id=entity.id, entities=[LinkedEntity._from_generated(e) for e in entity.entities], # pylint: disable=protected-access + warnings=[TextAnalyticsWarning._from_generated(w) for w in entity.warnings], # pylint: disable=protected-access statistics=TextDocumentStatistics._from_generated(entity.statistics), # pylint: disable=protected-access ) @@ -107,6 +111,7 @@ def key_phrases_result(phrases): return ExtractKeyPhrasesResult( id=phrases.id, key_phrases=phrases.key_phrases, + warnings=[TextAnalyticsWarning._from_generated(w) for w in phrases.warnings], # pylint: disable=protected-access statistics=TextDocumentStatistics._from_generated(phrases.statistics), # pylint: disable=protected-access ) @@ -116,6 +121,7 @@ def sentiment_result(sentiment): return AnalyzeSentimentResult( id=sentiment.id, sentiment=sentiment.sentiment, + warnings=[TextAnalyticsWarning._from_generated(w) for w in sentiment.warnings], # pylint: disable=protected-access statistics=TextDocumentStatistics._from_generated(sentiment.statistics), # pylint: disable=protected-access confidence_scores=SentimentConfidenceScores._from_generated(sentiment.confidence_scores), # pylint: disable=protected-access sentences=[SentenceSentiment._from_generated(s) for s in sentiment.sentences], # pylint: disable=protected-access diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_get_detailed_diagnostics_information_async.py b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_get_detailed_diagnostics_information_async.py index 6e5ff2ca6528..c8aa8371cd3f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_get_detailed_diagnostics_information_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_get_detailed_diagnostics_information_async.py @@ -53,12 +53,14 @@ def callback(resp): _LOGGER.info("raw_response: {}".format(resp.raw_response)) async with text_analytics_client: - result = await text_analytics_client.analyze_sentiment( + result = await text_analytics_client.extract_key_phrases( documents, show_stats=True, model_version="latest", raw_response_hook=callback ) + for doc in result: + _LOGGER.warning("Doc with id {} has these warnings: {}".format(doc.id, doc.warnings)) async def main(): diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py index ea561c7066d5..fd457a115d2d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py @@ -50,12 +50,14 @@ def callback(resp): _LOGGER.info("model_version: {}".format(resp.model_version)) _LOGGER.info("raw_response: {}".format(resp.raw_response)) - result = text_analytics_client.analyze_sentiment( + result = text_analytics_client.extract_key_phrases( documents, show_stats=True, model_version="latest", raw_response_hook=callback ) + for doc in result: + _LOGGER.warning("Doc with id {} has these warnings: {}".format(doc.id, doc.warnings)) if __name__ == '__main__': diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml new file mode 100644 index 000000000000..06d424ae48cb --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml @@ -0,0 +1,45 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/sentiment?showStats=false + response: + body: + string: '{"documents":[{"id":"1","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"offset":0,"length":40,"text":"This + won''t actually create a warning :''("}],"warnings":[]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: + - c1f122eb-deb0-4369-b857-2d91e3d0e348 + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.BatchScoring=1 + date: + - Fri, 24 Apr 2020 18:51:45 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '276' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml new file mode 100644 index 000000000000..ddbdd5f063b1 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml @@ -0,0 +1,34 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/sentiment?showStats=false + response: + body: + string: '{"documents":[{"id":"1","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"offset":0,"length":40,"text":"This + won''t actually create a warning :''("}],"warnings":[]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: cc030197-a61d-453a-a32c-e719084eb5e7 + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 + date: Mon, 27 Apr 2020 22:13:41 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '108' + status: + code: 200 + message: OK + url: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/sentiment?showStats=false +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language.test_document_warnings.yaml new file mode 100644 index 000000000000..8b19143f43b0 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language.test_document_warnings.yaml @@ -0,0 +1,44 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "countryHint": "US"}]}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '101' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/languages?showStats=false + response: + body: + string: '{"documents":[{"id":"1","detectedLanguage":{"name":"English","iso6391Name":"en","confidenceScore":1.0},"warnings":[]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: + - 75955ed2-88c8-4d5a-84a3-1123b894ba70 + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.BatchScoring=1 + date: + - Mon, 27 Apr 2020 22:14:53 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '2' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language_async.test_document_warnings.yaml new file mode 100644 index 000000000000..c0ccafa7d880 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_detect_language_async.test_document_warnings.yaml @@ -0,0 +1,33 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "countryHint": "US"}]}' + headers: + Accept: + - application/json + Content-Length: + - '101' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/languages?showStats=false + response: + body: + string: '{"documents":[{"id":"1","detectedLanguage":{"name":"English","iso6391Name":"en","confidenceScore":1.0},"warnings":[]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: 464aee61-67d7-4209-a670-f3c986dafc7d + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 + date: Mon, 27 Apr 2020 22:15:23 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '25' + status: + code: 200 + message: OK + url: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/languages?showStats=false +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases.test_document_warnings.yaml new file mode 100644 index 000000000000..b62ed5f65cec --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases.test_document_warnings.yaml @@ -0,0 +1,46 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmostdoesn''tseemtostopatanygivenpointintime.ThereasonforthistestistotryandseewhathappenswhenwesubmitaveryveryverylongtexttoLanguage.Thisshouldworkjustfinebutjustincaseitisalwaysgoodtohaveatestcase.ThisallowsustotestwhathappensifitisnotOK.Ofcourseitisgoingtobeokbutthenagainitisalsobettertobesure!", + "language": "en"}]}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '413' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/keyPhrases?showStats=false + response: + body: + string: '{"documents":[{"id":"1","keyPhrases":["Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmost"],"warnings":[{"code":"LongWordsInDocument","message":"The + document contains very long words (longer than 64 characters). These words + will be truncated and may result in unreliable model predictions."}]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: + - bbaeaafd-7ae8-46d9-88bb-a0eae6fdab85 + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.BatchScoring=1 + date: + - Fri, 24 Apr 2020 18:40:40 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '4' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases_async.test_document_warnings.yaml new file mode 100644 index 000000000000..8b12da7a002f --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_extract_key_phrases_async.test_document_warnings.yaml @@ -0,0 +1,35 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmostdoesn''tseemtostopatanygivenpointintime.ThereasonforthistestistotryandseewhathappenswhenwesubmitaveryveryverylongtexttoLanguage.Thisshouldworkjustfinebutjustincaseitisalwaysgoodtohaveatestcase.ThisallowsustotestwhathappensifitisnotOK.Ofcourseitisgoingtobeokbutthenagainitisalsobettertobesure!", + "language": "en"}]}' + headers: + Accept: + - application/json + Content-Length: + - '413' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/keyPhrases?showStats=false + response: + body: + string: '{"documents":[{"id":"1","keyPhrases":["Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmost"],"warnings":[{"code":"LongWordsInDocument","message":"The + document contains very long words (longer than 64 characters). These words + will be truncated and may result in unreliable model predictions."}]}],"errors":[],"modelVersion":"2019-10-01"}' + headers: + apim-request-id: c761fc46-cd6f-4885-a8bb-8f5f2606a6d5 + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 + date: Fri, 24 Apr 2020 18:41:46 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '5' + status: + code: 200 + message: OK + url: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/keyPhrases?showStats=false +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities.test_document_warnings.yaml new file mode 100644 index 000000000000..be4c48c42611 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities.test_document_warnings.yaml @@ -0,0 +1,44 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/recognition/general?showStats=false + response: + body: + string: '{"documents":[{"id":"1","entities":[],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + headers: + apim-request-id: + - 978a15c1-e20e-46da-adc3-3eae45d8da71 + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.BatchScoring=1 + date: + - Fri, 24 Apr 2020 18:57:25 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '93' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities_async.test_document_warnings.yaml new file mode 100644 index 000000000000..ec0258316407 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_entities_async.test_document_warnings.yaml @@ -0,0 +1,33 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/recognition/general?showStats=false + response: + body: + string: '{"documents":[{"id":"1","entities":[],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + headers: + apim-request-id: 2068e07e-9698-4369-a9de-8239197220ad + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 + date: Fri, 24 Apr 2020 18:58:18 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '82' + status: + code: 200 + message: OK + url: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/recognition/general?showStats=false +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities.test_document_warnings.yaml new file mode 100644 index 000000000000..55d426658bc7 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities.test_document_warnings.yaml @@ -0,0 +1,44 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/linking?showStats=false + response: + body: + string: '{"documents":[{"id":"1","entities":[],"warnings":[]}],"errors":[],"modelVersion":"2020-02-01"}' + headers: + apim-request-id: + - 8e080839-b000-4b69-b4b3-864530fa4fd3 + content-type: + - application/json; charset=utf-8 + csp-billing-usage: + - CognitiveServices.TextAnalytics.BatchScoring=1 + date: + - Fri, 24 Apr 2020 18:59:22 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '27' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities_async.test_document_warnings.yaml new file mode 100644 index 000000000000..f69c25558b81 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_recognize_linked_entities_async.test_document_warnings.yaml @@ -0,0 +1,33 @@ +interactions: +- request: + body: '{"documents": [{"id": "1", "text": "This won''t actually create a warning + :''(", "language": "en"}]}' + headers: + Accept: + - application/json + Content-Length: + - '98' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-textanalytics/1.0.0b5 Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + Python/3.7.7 (Darwin-17.7.0-x86_64-i386-64bit) + method: POST + uri: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/linking?showStats=false + response: + body: + string: '{"documents":[{"id":"1","entities":[],"warnings":[]}],"errors":[],"modelVersion":"2020-02-01"}' + headers: + apim-request-id: 36ebcb42-e72f-451b-862e-bee0baa8059e + content-type: application/json; charset=utf-8 + csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 + date: Fri, 24 Apr 2020 19:00:11 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '21' + status: + code: 200 + message: OK + url: https://westus2.ppe.cognitiveservices.azure.com/text/analytics/v3.0/entities/linking?showStats=false +version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py index b19955fdb96a..1813fc591e68 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py @@ -123,6 +123,19 @@ def test_input_with_all_errors(self, client): self.assertTrue(response[1].is_error) self.assertTrue(response[2].is_error) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + def test_document_warnings(self, client): + # No warnings actually returned for analyze_sentiment. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = client.analyze_sentiment(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer(client_kwargs={"text_analytics_account_key": ""}) def test_empty_credential_class(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py index 9ffc17ad41eb..97447f1f9601 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py @@ -468,6 +468,19 @@ async def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + async def test_document_warnings(self, client): + # No warnings actually returned for analyze_sentiment. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = await client.analyze_sentiment(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() async def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py index cb8dd8320714..e9ee39ebcc5a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language.py @@ -454,6 +454,19 @@ def test_document_errors(self, client): self.assertEqual(doc_errors[1].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[1].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + def test_document_warnings(self, client): + # No warnings actually returned for detect_language. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = client.detect_language(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py index fce974e63502..b022c3805395 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_detect_language_async.py @@ -467,6 +467,19 @@ async def test_document_errors(self, client): self.assertEqual(doc_errors[1].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[1].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + async def test_document_warnings(self, client): + # No warnings actually returned for detect_language. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = await client.detect_language(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() async def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py index 7f0d55e973cd..e7921b6b745b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases.py @@ -406,6 +406,19 @@ def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + def test_document_warnings(self, client): + docs = [ + {"id": "1", "text": "Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmostdoesn'tseemtostopatanygivenpointintime.ThereasonforthistestistotryandseewhathappenswhenwesubmitaveryveryverylongtexttoLanguage.Thisshouldworkjustfinebutjustincaseitisalwaysgoodtohaveatestcase.ThisallowsustotestwhathappensifitisnotOK.Ofcourseitisgoingtobeokbutthenagainitisalsobettertobesure!"}, + ] + + result = client.extract_key_phrases(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(doc_warnings[0].code, "LongWordsInDocument") + self.assertIsNotNone(doc_warnings[0].message) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py index 1b9c2a9b0d42..6043ccaf7bad 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_extract_key_phrases_async.py @@ -421,6 +421,19 @@ async def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + async def test_document_warnings(self, client): + docs = [ + {"id": "1", "text": "Thisisaveryveryverylongtextwhichgoesonforalongtimeandwhichalmostdoesn'tseemtostopatanygivenpointintime.ThereasonforthistestistotryandseewhathappenswhenwesubmitaveryveryverylongtexttoLanguage.Thisshouldworkjustfinebutjustincaseitisalwaysgoodtohaveatestcase.ThisallowsustotestwhathappensifitisnotOK.Ofcourseitisgoingtobeokbutthenagainitisalsobettertobesure!"}, + ] + + result = await client.extract_key_phrases(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(doc_warnings[0].code, "LongWordsInDocument") + self.assertIsNotNone(doc_warnings[0].message) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() async def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py index 685adb2dd28f..0cb514de1f00 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities.py @@ -420,6 +420,19 @@ def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + def test_document_warnings(self, client): + # No warnings actually returned for recognize_entities. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = client.recognize_entities(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py index e3745c8bfb57..6a0982defc89 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_entities_async.py @@ -439,6 +439,19 @@ async def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + async def test_document_warnings(self, client): + # No warnings actually returned for recognize_entities. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = await client.recognize_entities(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() async def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py index 17455ecc7ef5..bfeb87b87762 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities.py @@ -416,6 +416,19 @@ def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + def test_document_warnings(self, client): + # No warnings actually returned for recognize_linked_entities. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = client.recognize_linked_entities(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py index 581b92eb067b..e6f046426c98 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py @@ -451,6 +451,19 @@ async def test_document_errors(self, client): self.assertEqual(doc_errors[2].error.code, "InvalidDocument") self.assertIsNotNone(doc_errors[2].error.message) + @GlobalTextAnalyticsAccountPreparer() + @TextAnalyticsClientPreparer() + async def test_document_warnings(self, client): + # No warnings actually returned for recognize_linked_entities. Will update when they add + docs = [ + {"id": "1", "text": "This won't actually create a warning :'("}, + ] + + result = await client.recognize_linked_entities(docs) + for doc in result: + doc_warnings = doc.warnings + self.assertEqual(len(doc_warnings), 0) + @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() async def test_missing_input_records_error(self, client): diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py index db854b7795fe..5fc2101612df 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_text_analytics.py @@ -34,9 +34,12 @@ def test_repr(self): text_document_statistics = _models.TextDocumentStatistics(grapheme_count=14, transaction_count=18) + warnings = [_models.TextAnalyticsWarning(code="LongWordsInDocument", message="The document contains very long words (longer than 64 characters). These words will be truncated and may result in unreliable model predictions.")] + recognize_entities_result = _models.RecognizeEntitiesResult( id="1", entities=[categorized_entity], + warnings=warnings, statistics=text_document_statistics, is_error=False ) @@ -44,6 +47,7 @@ def test_repr(self): detect_language_result = _models.DetectLanguageResult( id="1", primary_language=detected_language, + warnings=warnings, statistics=text_document_statistics, is_error=False ) @@ -56,7 +60,7 @@ def test_repr(self): extract_key_phrases_result = \ _models.ExtractKeyPhrasesResult( - id="1", key_phrases=["dog", "cat", "bird"], statistics=text_document_statistics, is_error=False + id="1", key_phrases=["dog", "cat", "bird"], warnings=warnings, statistics=text_document_statistics, is_error=False ) linked_entity_match = _models.LinkedEntityMatch(confidence_score=0.999, text="Bill Gates", grapheme_offset=0, @@ -72,7 +76,7 @@ def test_repr(self): ) recognize_linked_entities_result = \ _models.RecognizeLinkedEntitiesResult( - id="1", entities=[linked_entity], statistics=text_document_statistics, is_error=False + id="1", entities=[linked_entity], warnings=warnings, statistics=text_document_statistics, is_error=False ) sentiment_confidence_score_per_label = \ @@ -83,13 +87,13 @@ def test_repr(self): sentiment="neutral", confidence_scores=sentiment_confidence_score_per_label, grapheme_offset=0, - grapheme_length=10, - warnings=["sentence was too short to find sentiment"] + grapheme_length=10 ) analyze_sentiment_result = _models.AnalyzeSentimentResult( id="1", sentiment="positive", + warnings=warnings, statistics=text_document_statistics, confidence_scores=sentiment_confidence_score_per_label, sentences=[sentence_sentiment], @@ -117,15 +121,22 @@ def test_repr(self): repr(text_document_statistics)) self.assertEqual("RecognizeEntitiesResult(id=1, entities=[CategorizedEntity(text=Bill Gates, category=Person, " "subcategory=Age, grapheme_offset=0, grapheme_length=8, confidence_score=0.899)], " + "warnings=[TextAnalyticsWarning(code=LongWordsInDocument, message=The document contains very long words (longer than 64 characters). " + "These words will be truncated and may result in unreliable model predictions.)], " "statistics=TextDocumentStatistics(grapheme_count=14, transaction_count=18), " "is_error=False)", repr(recognize_entities_result)) self.assertEqual("DetectLanguageResult(id=1, primary_language=DetectedLanguage(name=English, " - "iso6391_name=en, confidence_score=1.0), statistics=TextDocumentStatistics(grapheme_count=14, " + "iso6391_name=en, confidence_score=1.0), " + "warnings=[TextAnalyticsWarning(code=LongWordsInDocument, message=The document contains very long words (longer than 64 characters). " + "These words will be truncated and may result in unreliable model predictions.)], " + "statistics=TextDocumentStatistics(grapheme_count=14, " "transaction_count=18), is_error=False)", repr(detect_language_result)) self.assertEqual("TextAnalyticsError(code=invalidRequest, message=The request is invalid, target=request)", repr(text_analytics_error)) - self.assertEqual("ExtractKeyPhrasesResult(id=1, key_phrases=['dog', 'cat', 'bird'], statistics=" - "TextDocumentStatistics(grapheme_count=14, transaction_count=18), is_error=False)", + self.assertEqual("ExtractKeyPhrasesResult(id=1, key_phrases=['dog', 'cat', 'bird'], " + "warnings=[TextAnalyticsWarning(code=LongWordsInDocument, message=The document contains very long words (longer than 64 characters). " + "These words will be truncated and may result in unreliable model predictions.)], " + "statistics=TextDocumentStatistics(grapheme_count=14, transaction_count=18), is_error=False)", repr(extract_key_phrases_result)) self.assertEqual("LinkedEntityMatch(confidence_score=0.999, text=Bill Gates, grapheme_offset=0, grapheme_length=8)", repr(linked_entity_match)) @@ -138,18 +149,25 @@ def test_repr(self): "grapheme_length=8), LinkedEntityMatch(confidence_score=0.999, text=Bill Gates, grapheme_offset=0, " "grapheme_length=8)], language=English, data_source_entity_id=Bill Gates, " "url=https://en.wikipedia.org/wiki/Bill_Gates, data_source=wikipedia)], " + "warnings=[TextAnalyticsWarning(code=LongWordsInDocument, message=The document contains very long words (longer than 64 characters). " + "These words will be truncated and may result in unreliable model predictions.)], " "statistics=TextDocumentStatistics(grapheme_count=14, " "transaction_count=18), is_error=False)", repr(recognize_linked_entities_result)) self.assertEqual("SentimentConfidenceScores(positive=0.99, neutral=0.05, negative=0.02)", repr(sentiment_confidence_score_per_label)) self.assertEqual("SentenceSentiment(text=This is a sentence., sentiment=neutral, confidence_scores=SentimentConfidenceScores(" - "positive=0.99, neutral=0.05, negative=0.02), grapheme_offset=0, grapheme_length=10)", repr(sentence_sentiment)) - self.assertEqual("AnalyzeSentimentResult(id=1, sentiment=positive, statistics=TextDocumentStatistics(" + "positive=0.99, neutral=0.05, negative=0.02), grapheme_offset=0, grapheme_length=10)", + repr(sentence_sentiment)) + self.assertEqual("AnalyzeSentimentResult(id=1, sentiment=positive, " + "warnings=[TextAnalyticsWarning(code=LongWordsInDocument, message=The document contains very long words (longer than 64 characters). " + "These words will be truncated and may result in unreliable model predictions.)], " + "statistics=TextDocumentStatistics(" "grapheme_count=14, transaction_count=18), confidence_scores=SentimentConfidenceScores" "(positive=0.99, neutral=0.05, negative=0.02), " "sentences=[SentenceSentiment(text=This is a sentence., sentiment=neutral, confidence_scores=" "SentimentConfidenceScores(positive=0.99, neutral=0.05, negative=0.02), " - "grapheme_offset=0, grapheme_length=10)], is_error=False)", + "grapheme_offset=0, grapheme_length=10)], " + "is_error=False)", repr(analyze_sentiment_result)) self.assertEqual("DocumentError(id=1, error=TextAnalyticsError(code=invalidRequest, " "message=The request is invalid, target=request), is_error=True)", repr(document_error))