diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index 66dfe5a9773e..8023b7eff46a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -2,6 +2,10 @@ ## 5.1.0b6 (Unreleased) +**Breaking Changes** + +- Renamed properties `aspect` and `opinions` to `target` and `assessments` respectively in class `MinedOpinion`. +- Renamed classes `AspectSentiment` and `OpinionSentiment` to `TargetSentiment` and `AssessmentSentiment` respectively. ## 5.1.0b5 (2021-02-10) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py index 8008b27db8b8..83f09344ee03 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py @@ -27,8 +27,8 @@ SentenceSentiment, SentimentConfidenceScores, MinedOpinion, - AspectSentiment, - OpinionSentiment, + TargetSentiment, + AssessmentSentiment, RecognizePiiEntitiesResult, PiiEntity, PiiEntityDomainType, @@ -67,8 +67,8 @@ 'SentenceSentiment', 'SentimentConfidenceScores', 'MinedOpinion', - 'AspectSentiment', - 'OpinionSentiment', + 'TargetSentiment', + 'AssessmentSentiment', 'RecognizePiiEntitiesResult', 'PiiEntity', 'PiiEntityDomainType', diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_async_lro.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_async_lro.py index a5fbcc5957f6..8b3e2764fc3d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_async_lro.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_async_lro.py @@ -170,8 +170,8 @@ class AsyncAnalyzeBatchActionsLROPollingMethod(TextAnalyticsAsyncLROPollingMetho @property def _current_body(self): - from ._generated.v3_1_preview_3.models import JobMetadata - return JobMetadata.deserialize(self._pipeline_response) + from ._generated.v3_1_preview_3.models import AnalyzeJobMetadata + return AnalyzeJobMetadata.deserialize(self._pipeline_response) @property def created_on(self): diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py index a5197d3e98f9..2d45f09597d1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py @@ -13,6 +13,7 @@ class TextAnalyticsApiVersion(str, Enum): """Text Analytics API versions supported by this package""" + V3_1_PREVIEW_4 = "v3.1-preview.4" #: this is the default version V3_1_PREVIEW = "v3.1-preview.3" V3_0 = "v3.0" diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_operations_mixin.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_operations_mixin.py index 28efc6437820..07e1856dae5f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_operations_mixin.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_operations_mixin.py @@ -13,8 +13,7 @@ import warnings # FIXME: have to manually reconfigure import path for multiapi operation mixin -from .._lro import AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod -from .._lro import AnalyzeBatchActionsLROPoller, AnalyzeBatchActionsLROPollingMethod +from .._lro import AnalyzeBatchActionsLROPoller, AnalyzeBatchActionsLROPollingMethod, AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse @@ -61,6 +60,8 @@ def analyze_status( api_version = self._get_api_version('analyze_status') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'analyze_status'".format(api_version)) mixin_instance = OperationClass() @@ -96,6 +97,8 @@ def begin_analyze( api_version = self._get_api_version('begin_analyze') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_analyze'".format(api_version)) mixin_instance = OperationClass() @@ -130,6 +133,8 @@ def begin_cancel_health_job( api_version = self._get_api_version('begin_cancel_health_job') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_cancel_health_job'".format(api_version)) mixin_instance = OperationClass() @@ -174,6 +179,8 @@ def begin_health( api_version = self._get_api_version('begin_health') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_health'".format(api_version)) mixin_instance = OperationClass() @@ -215,6 +222,8 @@ def entities_linking( from .v3_0.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_linking'".format(api_version)) mixin_instance = OperationClass() @@ -257,6 +266,8 @@ def entities_recognition_general( from .v3_0.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_recognition_general'".format(api_version)) mixin_instance = OperationClass() @@ -307,6 +318,8 @@ def entities_recognition_pii( api_version = self._get_api_version('entities_recognition_pii') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_recognition_pii'".format(api_version)) mixin_instance = OperationClass() @@ -348,6 +361,8 @@ def health_status( api_version = self._get_api_version('health_status') if api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'health_status'".format(api_version)) mixin_instance = OperationClass() @@ -389,6 +404,8 @@ def key_phrases( from .v3_0.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'key_phrases'".format(api_version)) mixin_instance = OperationClass() @@ -431,6 +448,8 @@ def languages( from .v3_0.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'languages'".format(api_version)) mixin_instance = OperationClass() @@ -473,6 +492,8 @@ def sentiment( from .v3_0.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from .v3_1_preview_3.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'sentiment'".format(api_version)) mixin_instance = OperationClass() diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py index 333b62e6856a..11dd20effe38 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py @@ -75,6 +75,8 @@ def __init__( base_url = '{Endpoint}/text/analytics/v3.0' elif api_version == 'v3.1-preview.3': base_url = '{Endpoint}/text/analytics/v3.1-preview.3' + elif api_version == 'v3.1-preview.4': + base_url = '{Endpoint}/text/analytics/v3.1-preview.4' else: raise ValueError("API version {} is not available".format(api_version)) self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs) @@ -94,6 +96,7 @@ def models(cls, api_version=DEFAULT_API_VERSION): * v3.0: :mod:`v3_0.models` * v3.1-preview.3: :mod:`v3_1_preview_3.models` + * v3.1-preview.4: :mod:`v3_1_preview_4.models` """ if api_version == 'v3.0': from .v3_0 import models @@ -101,6 +104,9 @@ def models(cls, api_version=DEFAULT_API_VERSION): elif api_version == 'v3.1-preview.3': from .v3_1_preview_3 import models return models + elif api_version == 'v3.1-preview.4': + from .v3_1_preview_4 import models + return models raise ValueError("API version {} is not available".format(api_version)) def close(self): diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_operations_mixin.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_operations_mixin.py index cb2794948437..cedc40af0fc8 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_operations_mixin.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_operations_mixin.py @@ -13,8 +13,7 @@ import warnings # FIXME: have to manually reconfigure import path for multiapi operation mixin -from ..._async_lro import AnalyzeHealthcareEntitiesAsyncLROPoller, AnalyzeHealthcareEntitiesAsyncLROPollingMethod -from ..._async_lro import AsyncAnalyzeBatchActionsLROPoller, AsyncAnalyzeBatchActionsLROPollingMethod +from ..._async_lro import AnalyzeHealthcareEntitiesAsyncLROPoller, AnalyzeHealthcareEntitiesAsyncLROPollingMethod, AsyncAnalyzeBatchActionsLROPoller, AsyncAnalyzeBatchActionsLROPollingMethod from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -57,6 +56,8 @@ async def analyze_status( api_version = self._get_api_version('analyze_status') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'analyze_status'".format(api_version)) mixin_instance = OperationClass() @@ -86,12 +87,14 @@ async def begin_analyze( :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncAnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response) - :rtype: ~...._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState] + :rtype: ~....._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState] :raises ~azure.core.exceptions.HttpResponseError: """ api_version = self._get_api_version('begin_analyze') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_analyze'".format(api_version)) mixin_instance = OperationClass() @@ -126,6 +129,8 @@ async def begin_cancel_health_job( api_version = self._get_api_version('begin_cancel_health_job') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_cancel_health_job'".format(api_version)) mixin_instance = OperationClass() @@ -170,6 +175,8 @@ async def begin_health( api_version = self._get_api_version('begin_health') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'begin_health'".format(api_version)) mixin_instance = OperationClass() @@ -211,6 +218,8 @@ async def entities_linking( from ..v3_0.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_linking'".format(api_version)) mixin_instance = OperationClass() @@ -253,6 +262,8 @@ async def entities_recognition_general( from ..v3_0.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_recognition_general'".format(api_version)) mixin_instance = OperationClass() @@ -303,6 +314,8 @@ async def entities_recognition_pii( api_version = self._get_api_version('entities_recognition_pii') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'entities_recognition_pii'".format(api_version)) mixin_instance = OperationClass() @@ -344,6 +357,8 @@ async def health_status( api_version = self._get_api_version('health_status') if api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'health_status'".format(api_version)) mixin_instance = OperationClass() @@ -385,6 +400,8 @@ async def key_phrases( from ..v3_0.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'key_phrases'".format(api_version)) mixin_instance = OperationClass() @@ -427,6 +444,8 @@ async def languages( from ..v3_0.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'languages'".format(api_version)) mixin_instance = OperationClass() @@ -469,6 +488,8 @@ async def sentiment( from ..v3_0.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4.aio.operations import TextAnalyticsClientOperationsMixin as OperationClass else: raise ValueError("API version {} does not have operation 'sentiment'".format(api_version)) mixin_instance = OperationClass() diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client.py index e60c34ea5476..cac9a1c199c1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client.py @@ -73,6 +73,8 @@ def __init__( base_url = '{Endpoint}/text/analytics/v3.0' elif api_version == 'v3.1-preview.3': base_url = '{Endpoint}/text/analytics/v3.1-preview.3' + elif api_version == 'v3.1-preview.4': + base_url = '{Endpoint}/text/analytics/v3.1-preview.4' else: raise ValueError("API version {} is not available".format(api_version)) self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs) @@ -92,6 +94,7 @@ def models(cls, api_version=DEFAULT_API_VERSION): * v3.0: :mod:`v3_0.models` * v3.1-preview.3: :mod:`v3_1_preview_3.models` + * v3.1-preview.4: :mod:`v3_1_preview_4.models` """ if api_version == 'v3.0': from ..v3_0 import models @@ -99,6 +102,9 @@ def models(cls, api_version=DEFAULT_API_VERSION): elif api_version == 'v3.1-preview.3': from ..v3_1_preview_3 import models return models + elif api_version == 'v3.1-preview.4': + from ..v3_1_preview_4 import models + return models raise ValueError("API version {} is not available".format(api_version)) async def close(self): diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/_metadata.json b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/_metadata.json new file mode 100644 index 000000000000..870a86ee2d3e --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/_metadata.json @@ -0,0 +1,264 @@ +{ + "chosen_version": "v3.1-preview.3", + "total_api_version_list": ["v3.1-preview.3"], + "client": { + "name": "TextAnalyticsClient", + "filename": "_text_analytics_client", + "description": "The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Further documentation can be found in :code:`\u003ca href=\"https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview\"\u003ehttps://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview\u003c/a\u003e`.", + "base_url": null, + "custom_base_url": "\u0027{Endpoint}/text/analytics/v3.1-preview.3\u0027", + "azure_arm": false, + "has_lro_operations": true, + "client_side_validation": false, + "sync_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.core\": [\"PipelineClient\"]}, \"local\": {\"._configuration\": [\"TextAnalyticsClientConfiguration\"], \"._operations_mixin\": [\"TextAnalyticsClientOperationsMixin\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}}", + "async_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.core\": [\"AsyncPipelineClient\"]}, \"local\": {\"._configuration\": [\"TextAnalyticsClientConfiguration\"], \"._operations_mixin\": [\"TextAnalyticsClientOperationsMixin\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}}" + }, + "global_parameters": { + "sync": { + "credential": { + "signature": "credential, # type: \"TokenCredential\"", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials.TokenCredential", + "required": true + }, + "endpoint": { + "signature": "endpoint, # type: str", + "description": "Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).", + "docstring_type": "str", + "required": true + } + }, + "async": { + "credential": { + "signature": "credential: \"AsyncTokenCredential\",", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials_async.AsyncTokenCredential", + "required": true + }, + "endpoint": { + "signature": "endpoint: str,", + "description": "Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).", + "docstring_type": "str", + "required": true + } + }, + "constant": { + }, + "call": "credential, endpoint", + "service_client_specific": { + "sync": { + "api_version": { + "signature": "api_version=None, # type: Optional[str]", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile=KnownProfiles.default, # type: KnownProfiles", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + }, + "async": { + "api_version": { + "signature": "api_version: Optional[str] = None,", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile: KnownProfiles = KnownProfiles.default,", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + } + } + }, + "config": { + "credential": true, + "credential_scopes": ["https://cognitiveservices.azure.com/.default"], + "credential_default_policy_type": "BearerTokenCredentialPolicy", + "credential_default_policy_type_has_async_version": true, + "credential_key_header_name": null, + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"]}, \"local\": {\"._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"]}, \"local\": {\".._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}}" + }, + "operation_groups": { + }, + "operation_mixins": { + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.exceptions\": [\"ClientAuthenticationError\", \"HttpResponseError\", \"ResourceExistsError\", \"ResourceNotFoundError\", \"map_error\"], \"azure.core.pipeline\": [\"PipelineResponse\"], \"azure.core.pipeline.transport\": [\"HttpRequest\", \"HttpResponse\"], \"...._lro\": [\"AnalyzeBatchActionsLROPoller\", \"AnalyzeBatchActionsLROPollingMethod\", \"AnalyzeHealthcareEntitiesLROPoller\", \"AnalyzeHealthcareEntitiesLROPollingMethod\"], \"azure.core.polling\": [\"LROPoller\", \"NoPolling\", \"PollingMethod\"], \"azure.core.polling.base_polling\": [\"LROBasePolling\"]}, \"stdlib\": {\"warnings\": [null]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Callable\", \"Dict\", \"Generic\", \"List\", \"Optional\", \"TypeVar\", \"Union\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.exceptions\": [\"ClientAuthenticationError\", \"HttpResponseError\", \"ResourceExistsError\", \"ResourceNotFoundError\", \"map_error\"], \"azure.core.pipeline\": [\"PipelineResponse\"], \"azure.core.pipeline.transport\": [\"AsyncHttpResponse\", \"HttpRequest\"], \"....._async_lro\": [\"AnalyzeHealthcareEntitiesAsyncLROPoller\", \"AnalyzeHealthcareEntitiesAsyncLROPollingMethod\", \"AsyncAnalyzeBatchActionsLROPoller\", \"AsyncAnalyzeBatchActionsLROPollingMethod\"], \"azure.core.polling\": [\"AsyncLROPoller\", \"AsyncNoPolling\", \"AsyncPollingMethod\"], \"azure.core.polling.async_base_polling\": [\"AsyncLROBasePolling\"]}, \"stdlib\": {\"warnings\": [null]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Callable\", \"Dict\", \"Generic\", \"List\", \"Optional\", \"TypeVar\", \"Union\"]}}}", + "operations": { + "_analyze_initial" : { + "sync": { + "signature": "def _analyze_initial(\n self,\n body=None, # type: Optional[\"_models.AnalyzeBatchInput\"]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _analyze_initial(\n self,\n body: Optional[\"_models.AnalyzeBatchInput\"] = None,\n **kwargs\n) -\u003e Optional[\"_models.AnalyzeJobState\"]:\n", + "doc": "\"\"\"\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "body" + }, + "begin_analyze" : { + "sync": { + "signature": "def begin_analyze(\n self,\n body=None, # type: Optional[\"_models.AnalyzeBatchInput\"]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Submit analysis job.\n\nSubmit a collection of text documents for analysis. Specify one or more unique tasks to be\nexecuted.\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeBatchActionsLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response)\n:rtype: ~...._lro.AnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_analyze(\n self,\n body: Optional[\"_models.AnalyzeBatchInput\"] = None,\n **kwargs\n) -\u003e AsyncAnalyzeBatchActionsLROPoller[\"_models.AnalyzeJobState\"]:\n", + "doc": "\"\"\"Submit analysis job.\n\nSubmit a collection of text documents for analysis. Specify one or more unique tasks to be\nexecuted.\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AsyncAnalyzeBatchActionsLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AsyncAnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response)\n:rtype: ~....._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "body" + }, + "analyze_status" : { + "sync": { + "signature": "def analyze_status(\n self,\n job_id, # type: str\n show_stats=None, # type: Optional[bool]\n top=20, # type: Optional[int]\n skip=0, # type: Optional[int]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Get analysis status and results.\n\nGet the status of an analysis job. A job may consist of one or more tasks. Once all tasks are\ncompleted, the job will transition to the completed state and results will be available for\neach task.\n\n:param job_id: Job ID for Analyze.\n:type job_id: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def analyze_status(\n self,\n job_id: str,\n show_stats: Optional[bool] = None,\n top: Optional[int] = 20,\n skip: Optional[int] = 0,\n **kwargs\n) -\u003e \"_models.AnalyzeJobState\":\n", + "doc": "\"\"\"Get analysis status and results.\n\nGet the status of an analysis job. A job may consist of one or more tasks. Once all tasks are\ncompleted, the job will transition to the completed state and results will be available for\neach task.\n\n:param job_id: Job ID for Analyze.\n:type job_id: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id, show_stats, top, skip" + }, + "health_status" : { + "sync": { + "signature": "def health_status(\n self,\n job_id, # type: str\n top=20, # type: Optional[int]\n skip=0, # type: Optional[int]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Get healthcare analysis job status and results.\n\nGet details of the healthcare prediction job specified by the jobId.\n\n:param job_id: Job ID.\n:type job_id: str\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def health_status(\n self,\n job_id: str,\n top: Optional[int] = 20,\n skip: Optional[int] = 0,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.HealthcareJobState\":\n", + "doc": "\"\"\"Get healthcare analysis job status and results.\n\nGet details of the healthcare prediction job specified by the jobId.\n\n:param job_id: Job ID.\n:type job_id: str\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id, top, skip, show_stats" + }, + "_cancel_health_job_initial" : { + "sync": { + "signature": "def _cancel_health_job_initial(\n self,\n job_id, # type: str\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: None, or the result of cls(response)\n:rtype: None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _cancel_health_job_initial(\n self,\n job_id: str,\n **kwargs\n) -\u003e None:\n", + "doc": "\"\"\"\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: None, or the result of cls(response)\n:rtype: None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id" + }, + "begin_cancel_health_job" : { + "sync": { + "signature": "def begin_cancel_health_job(\n self,\n job_id, # type: str\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Cancel healthcare prediction job.\n\nCancel healthcare prediction job.\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the LROBasePolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of LROPoller that returns either None or the result of cls(response)\n:rtype: ~azure.core.polling.LROPoller[None]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_cancel_health_job(\n self,\n job_id: str,\n **kwargs\n) -\u003e AsyncLROPoller[None]:\n", + "doc": "\"\"\"Cancel healthcare prediction job.\n\nCancel healthcare prediction job.\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AsyncLROBasePolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n:rtype: ~azure.core.polling.AsyncLROPoller[None]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "job_id" + }, + "_health_initial" : { + "sync": { + "signature": "def _health_initial(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _health_initial(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e Optional[\"_models.HealthcareJobState\"]:\n", + "doc": "\"\"\"\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, string_index_type" + }, + "begin_health" : { + "sync": { + "signature": "def begin_health(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Submit healthcare analysis job.\n\nStart a healthcare analysis job to recognize healthcare related entities (drugs, conditions,\nsymptoms, etc) and their relations.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeHealthcareEntitiesLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeHealthcareEntitiesLROPoller that returns either HealthcareJobState or the result of cls(response)\n:rtype: ~...._lro.AnalyzeHealthcareEntitiesLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_health(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e AnalyzeHealthcareEntitiesAsyncLROPoller[\"_models.HealthcareJobState\"]:\n", + "doc": "\"\"\"Submit healthcare analysis job.\n\nStart a healthcare analysis job to recognize healthcare related entities (drugs, conditions,\nsymptoms, etc) and their relations.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeHealthcareEntitiesAsyncLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeHealthcareEntitiesAsyncLROPoller that returns either HealthcareJobState or the result of cls(response)\n:rtype: ~....._async_lro.AnalyzeHealthcareEntitiesAsyncLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "documents, model_version, string_index_type" + }, + "entities_recognition_general" : { + "sync": { + "signature": "def entities_recognition_general(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Named Entity Recognition.\n\nThe API returns a list of general named entities in a given document. For the list of supported\nentity types, check :code:`\u003ca href=\"https://aka.ms/taner\"\u003eSupported Entity Types in Text\nAnalytics API\u003c/a\u003e`. See the :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text\nAnalytics API\u003c/a\u003e` for the list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntitiesResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_recognition_general(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.EntitiesResult\":\n", + "doc": "\"\"\"Named Entity Recognition.\n\nThe API returns a list of general named entities in a given document. For the list of supported\nentity types, check :code:`\u003ca href=\"https://aka.ms/taner\"\u003eSupported Entity Types in Text\nAnalytics API\u003c/a\u003e`. See the :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text\nAnalytics API\u003c/a\u003e` for the list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntitiesResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, string_index_type" + }, + "entities_recognition_pii" : { + "sync": { + "signature": "def entities_recognition_pii(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n domain=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Entities containing personal information.\n\nThe API returns a list of entities with personal information (\\\"SSN\\\", \\\"Bank Account\\\" etc) in\nthe document. For the list of supported entity types, check :code:`\u003ca\nhref=\"https://aka.ms/tanerpii\"\u003eSupported Entity Types in Text Analytics API\u003c/a\u003e`. See the\n:code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the\nlist of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param domain: (Optional) if specified, will set the PII domain to include only a subset of the\n entity categories. Possible values include: \u0027PHI\u0027, \u0027none\u0027.\n:type domain: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: PiiResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.PiiResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_recognition_pii(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n domain: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.PiiResult\":\n", + "doc": "\"\"\"Entities containing personal information.\n\nThe API returns a list of entities with personal information (\\\"SSN\\\", \\\"Bank Account\\\" etc) in\nthe document. For the list of supported entity types, check :code:`\u003ca\nhref=\"https://aka.ms/tanerpii\"\u003eSupported Entity Types in Text Analytics API\u003c/a\u003e`. See the\n:code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the\nlist of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param domain: (Optional) if specified, will set the PII domain to include only a subset of the\n entity categories. Possible values include: \u0027PHI\u0027, \u0027none\u0027.\n:type domain: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: PiiResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.PiiResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, domain, string_index_type" + }, + "entities_linking" : { + "sync": { + "signature": "def entities_linking(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Linked entities from a well-known knowledge base.\n\nThe API returns a list of recognized entities with links to a well-known knowledge base. See\nthe :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for\nthe list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntityLinkingResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.EntityLinkingResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_linking(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.EntityLinkingResult\":\n", + "doc": "\"\"\"Linked entities from a well-known knowledge base.\n\nThe API returns a list of recognized entities with links to a well-known knowledge base. See\nthe :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for\nthe list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntityLinkingResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.EntityLinkingResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, string_index_type" + }, + "key_phrases" : { + "sync": { + "signature": "def key_phrases(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Key Phrases.\n\nThe API returns a list of strings denoting the key phrases in the input text. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: KeyPhraseResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhraseResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def key_phrases(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.KeyPhraseResult\":\n", + "doc": "\"\"\"Key Phrases.\n\nThe API returns a list of strings denoting the key phrases in the input text. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: KeyPhraseResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhraseResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats" + }, + "languages" : { + "sync": { + "signature": "def languages(\n self,\n documents, # type: List[\"_models.LanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Detect Language.\n\nThe API returns the detected language and a numeric score between 0 and 1. Scores close to 1\nindicate 100% certainty that the identified language is true. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents:\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.LanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: LanguageResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.LanguageResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def languages(\n self,\n documents: List[\"_models.LanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.LanguageResult\":\n", + "doc": "\"\"\"Detect Language.\n\nThe API returns the detected language and a numeric score between 0 and 1. Scores close to 1\nindicate 100% certainty that the identified language is true. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents:\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.LanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: LanguageResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.LanguageResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats" + }, + "sentiment" : { + "sync": { + "signature": "def sentiment(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n opinion_mining=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Sentiment.\n\nThe API returns a detailed sentiment analysis for the input text. The analysis is done in\nmultiple levels of granularity, start from the a document level, down to sentence and key terms\n(aspects) and opinions.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param opinion_mining: (Optional) if set to true, response will contain input and document\n level statistics including aspect-based sentiment analysis results.\n:type opinion_mining: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: SentimentResponse, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentResponse\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def sentiment(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n opinion_mining: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.SentimentResponse\":\n", + "doc": "\"\"\"Sentiment.\n\nThe API returns a detailed sentiment analysis for the input text. The analysis is done in\nmultiple levels of granularity, start from the a document level, down to sentence and key terms\n(aspects) and opinions.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param opinion_mining: (Optional) if set to true, response will contain input and document\n level statistics including aspect-based sentiment analysis results.\n:type opinion_mining: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: SentimentResponse, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentResponse\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, opinion_mining, string_index_type" + } + } + } +} \ No newline at end of file diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/aio/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/aio/operations/_text_analytics_client_operations.py index 4271cfa5ad8f..07b6892a3009 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/aio/operations/_text_analytics_client_operations.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/aio/operations/_text_analytics_client_operations.py @@ -8,12 +8,7 @@ from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings -from ....._async_lro import ( - AnalyzeHealthcareEntitiesAsyncLROPoller, - AnalyzeHealthcareEntitiesAsyncLROPollingMethod, - AsyncAnalyzeBatchActionsLROPoller, - AsyncAnalyzeBatchActionsLROPollingMethod -) +from ....._async_lro import AnalyzeHealthcareEntitiesAsyncLROPoller, AnalyzeHealthcareEntitiesAsyncLROPollingMethod, AsyncAnalyzeBatchActionsLROPoller, AsyncAnalyzeBatchActionsLROPollingMethod from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -106,7 +101,7 @@ async def begin_analyze( :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncAnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response) - :rtype: ~...._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState] + :rtype: ~....._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobState] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/__init__.py index 5b0c3c08fc83..2283433631d4 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/__init__.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/__init__.py @@ -8,6 +8,7 @@ try: from ._models_py3 import AnalyzeBatchInput + from ._models_py3 import AnalyzeJobMetadata from ._models_py3 import AnalyzeJobState from ._models_py3 import AspectConfidenceScoreLabel from ._models_py3 import AspectRelation @@ -71,6 +72,7 @@ from ._models_py3 import TextAnalyticsWarning except (SyntaxError, ImportError): from ._models import AnalyzeBatchInput # type: ignore + from ._models import AnalyzeJobMetadata # type: ignore from ._models import AnalyzeJobState # type: ignore from ._models import AspectConfidenceScoreLabel # type: ignore from ._models import AspectRelation # type: ignore @@ -149,6 +151,7 @@ __all__ = [ 'AnalyzeBatchInput', + 'AnalyzeJobMetadata', 'AnalyzeJobState', 'AspectConfidenceScoreLabel', 'AspectRelation', diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models.py index ebfc34f1abc8..951f9939c49e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models.py @@ -92,59 +92,58 @@ def __init__( self.analysis_input = kwargs['analysis_input'] -class Pagination(msrest.serialization.Model): - """Pagination. - - :param next_link: - :type next_link: str - """ - - _attribute_map = { - 'next_link': {'key': '@nextLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Pagination, self).__init__(**kwargs) - self.next_link = kwargs.get('next_link', None) - - -class TasksState(msrest.serialization.Model): - """TasksState. +class JobMetadata(msrest.serialization.Model): + """JobMetadata. All required parameters must be populated in order to send to Azure. - :param tasks: Required. - :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State """ _validation = { - 'tasks': {'required': True}, + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, } _attribute_map = { - 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): - super(TasksState, self).__init__(**kwargs) - self.tasks = kwargs['tasks'] + super(JobMetadata, self).__init__(**kwargs) + self.created_date_time = kwargs['created_date_time'] + self.expiration_date_time = kwargs.get('expiration_date_time', None) + self.job_id = kwargs['job_id'] + self.last_update_date_time = kwargs['last_update_date_time'] + self.status = kwargs['status'] -class JobMetadata(msrest.serialization.Model): - """JobMetadata. +class AnalyzeJobMetadata(JobMetadata): + """AnalyzeJobMetadata. All required parameters must be populated in order to send to Azure. :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -154,6 +153,8 @@ class JobMetadata(msrest.serialization.Model): :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State + :param display_name: + :type display_name: str """ _validation = { @@ -165,27 +166,66 @@ class JobMetadata(msrest.serialization.Model): _attribute_map = { 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, } def __init__( self, **kwargs ): - super(JobMetadata, self).__init__(**kwargs) - self.created_date_time = kwargs['created_date_time'] + super(AnalyzeJobMetadata, self).__init__(**kwargs) self.display_name = kwargs.get('display_name', None) - self.expiration_date_time = kwargs.get('expiration_date_time', None) - self.job_id = kwargs['job_id'] - self.last_update_date_time = kwargs['last_update_date_time'] - self.status = kwargs['status'] -class AnalyzeJobState(JobMetadata, TasksState, Pagination): +class Pagination(msrest.serialization.Model): + """Pagination. + + :param next_link: + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Pagination, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + + +class TasksState(msrest.serialization.Model): + """TasksState. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksState, self).__init__(**kwargs) + self.tasks = kwargs['tasks'] + + +class AnalyzeJobState(AnalyzeJobMetadata, TasksState, Pagination): """AnalyzeJobState. All required parameters must be populated in order to send to Azure. @@ -196,8 +236,6 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -207,6 +245,8 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State + :param display_name: + :type display_name: str :param errors: :type errors: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError] :param statistics: if showStats=true was specified in the request this field will contain @@ -226,11 +266,11 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): 'next_link': {'key': '@nextLink', 'type': 'str'}, 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, } @@ -246,20 +286,20 @@ def __init__( self.statistics = kwargs.get('statistics', None) self.next_link = kwargs.get('next_link', None) self.created_date_time = kwargs['created_date_time'] - self.display_name = kwargs.get('display_name', None) self.expiration_date_time = kwargs.get('expiration_date_time', None) self.job_id = kwargs['job_id'] self.last_update_date_time = kwargs['last_update_date_time'] self.status = kwargs['status'] + self.display_name = kwargs.get('display_name', None) self.errors = kwargs.get('errors', None) self.statistics = kwargs.get('statistics', None) self.tasks = kwargs['tasks'] self.created_date_time = kwargs['created_date_time'] - self.display_name = kwargs.get('display_name', None) self.expiration_date_time = kwargs.get('expiration_date_time', None) self.job_id = kwargs['job_id'] self.last_update_date_time = kwargs['last_update_date_time'] self.status = kwargs['status'] + self.display_name = kwargs.get('display_name', None) self.errors = kwargs.get('errors', None) self.statistics = kwargs.get('statistics', None) @@ -1049,8 +1089,6 @@ class HealthcareJobState(JobMetadata, Pagination): :type next_link: str :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -1076,7 +1114,6 @@ class HealthcareJobState(JobMetadata, Pagination): _attribute_map = { 'next_link': {'key': '@nextLink', 'type': 'str'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, @@ -1094,7 +1131,6 @@ def __init__( self.results = kwargs.get('results', None) self.errors = kwargs.get('errors', None) self.created_date_time = kwargs['created_date_time'] - self.display_name = kwargs.get('display_name', None) self.expiration_date_time = kwargs.get('expiration_date_time', None) self.job_id = kwargs['job_id'] self.last_update_date_time = kwargs['last_update_date_time'] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models_py3.py index 46cfa90ec725..0f8ce3bb049b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models_py3.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/models/_models_py3.py @@ -105,63 +105,64 @@ def __init__( self.analysis_input = analysis_input -class Pagination(msrest.serialization.Model): - """Pagination. - - :param next_link: - :type next_link: str - """ - - _attribute_map = { - 'next_link': {'key': '@nextLink', 'type': 'str'}, - } - - def __init__( - self, - *, - next_link: Optional[str] = None, - **kwargs - ): - super(Pagination, self).__init__(**kwargs) - self.next_link = next_link - - -class TasksState(msrest.serialization.Model): - """TasksState. +class JobMetadata(msrest.serialization.Model): + """JobMetadata. All required parameters must be populated in order to send to Azure. - :param tasks: Required. - :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State """ _validation = { - 'tasks': {'required': True}, + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, } _attribute_map = { - 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, *, - tasks: "TasksStateTasks", + created_date_time: datetime.datetime, + job_id: str, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + expiration_date_time: Optional[datetime.datetime] = None, **kwargs ): - super(TasksState, self).__init__(**kwargs) - self.tasks = tasks + super(JobMetadata, self).__init__(**kwargs) + self.created_date_time = created_date_time + self.expiration_date_time = expiration_date_time + self.job_id = job_id + self.last_update_date_time = last_update_date_time + self.status = status -class JobMetadata(msrest.serialization.Model): - """JobMetadata. +class AnalyzeJobMetadata(JobMetadata): + """AnalyzeJobMetadata. All required parameters must be populated in order to send to Azure. :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -171,6 +172,8 @@ class JobMetadata(msrest.serialization.Model): :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State + :param display_name: + :type display_name: str """ _validation = { @@ -182,11 +185,11 @@ class JobMetadata(msrest.serialization.Model): _attribute_map = { 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, } def __init__( @@ -196,20 +199,63 @@ def __init__( job_id: str, last_update_date_time: datetime.datetime, status: Union[str, "State"], - display_name: Optional[str] = None, expiration_date_time: Optional[datetime.datetime] = None, + display_name: Optional[str] = None, **kwargs ): - super(JobMetadata, self).__init__(**kwargs) - self.created_date_time = created_date_time + super(AnalyzeJobMetadata, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, **kwargs) self.display_name = display_name - self.expiration_date_time = expiration_date_time - self.job_id = job_id - self.last_update_date_time = last_update_date_time - self.status = status -class AnalyzeJobState(JobMetadata, TasksState, Pagination): +class Pagination(msrest.serialization.Model): + """Pagination. + + :param next_link: + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + **kwargs + ): + super(Pagination, self).__init__(**kwargs) + self.next_link = next_link + + +class TasksState(msrest.serialization.Model): + """TasksState. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + } + + def __init__( + self, + *, + tasks: "TasksStateTasks", + **kwargs + ): + super(TasksState, self).__init__(**kwargs) + self.tasks = tasks + + +class AnalyzeJobState(AnalyzeJobMetadata, TasksState, Pagination): """AnalyzeJobState. All required parameters must be populated in order to send to Azure. @@ -220,8 +266,6 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): :type tasks: ~azure.ai.textanalytics.v3_1_preview_3.models.TasksStateTasks :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -231,6 +275,8 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): :param status: Required. Possible values include: "notStarted", "running", "succeeded", "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State + :param display_name: + :type display_name: str :param errors: :type errors: list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError] :param statistics: if showStats=true was specified in the request this field will contain @@ -250,11 +296,11 @@ class AnalyzeJobState(JobMetadata, TasksState, Pagination): 'next_link': {'key': '@nextLink', 'type': 'str'}, 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, } @@ -268,33 +314,33 @@ def __init__( last_update_date_time: datetime.datetime, status: Union[str, "State"], next_link: Optional[str] = None, - display_name: Optional[str] = None, expiration_date_time: Optional[datetime.datetime] = None, + display_name: Optional[str] = None, errors: Optional[List["TextAnalyticsError"]] = None, statistics: Optional["RequestStatistics"] = None, **kwargs ): - super(AnalyzeJobState, self).__init__(created_date_time=created_date_time, display_name=display_name, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, tasks=tasks, next_link=next_link, **kwargs) + super(AnalyzeJobState, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, display_name=display_name, tasks=tasks, next_link=next_link, **kwargs) self.next_link = next_link self.tasks = tasks self.errors = errors self.statistics = statistics self.next_link = next_link self.created_date_time = created_date_time - self.display_name = display_name self.expiration_date_time = expiration_date_time self.job_id = job_id self.last_update_date_time = last_update_date_time self.status = status + self.display_name = display_name self.errors = errors self.statistics = statistics self.tasks = tasks self.created_date_time = created_date_time - self.display_name = display_name self.expiration_date_time = expiration_date_time self.job_id = job_id self.last_update_date_time = last_update_date_time self.status = status + self.display_name = display_name self.errors = errors self.statistics = statistics @@ -1175,8 +1221,6 @@ class HealthcareJobState(JobMetadata, Pagination): :type next_link: str :param created_date_time: Required. :type created_date_time: ~datetime.datetime - :param display_name: - :type display_name: str :param expiration_date_time: :type expiration_date_time: ~datetime.datetime :param job_id: Required. @@ -1202,7 +1246,6 @@ class HealthcareJobState(JobMetadata, Pagination): _attribute_map = { 'next_link': {'key': '@nextLink', 'type': 'str'}, 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'display_name': {'key': 'displayName', 'type': 'str'}, 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, 'job_id': {'key': 'jobId', 'type': 'str'}, 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, @@ -1219,18 +1262,16 @@ def __init__( last_update_date_time: datetime.datetime, status: Union[str, "State"], next_link: Optional[str] = None, - display_name: Optional[str] = None, expiration_date_time: Optional[datetime.datetime] = None, results: Optional["HealthcareResult"] = None, errors: Optional[List["TextAnalyticsError"]] = None, **kwargs ): - super(HealthcareJobState, self).__init__(created_date_time=created_date_time, display_name=display_name, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, next_link=next_link, **kwargs) + super(HealthcareJobState, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, next_link=next_link, **kwargs) self.next_link = next_link self.results = results self.errors = errors self.created_date_time = created_date_time - self.display_name = display_name self.expiration_date_time = expiration_date_time self.job_id = job_id self.last_update_date_time = last_update_date_time diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/operations/_text_analytics_client_operations.py index e35bc5c9bc38..4fdad40602c2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/operations/_text_analytics_client_operations.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_3/operations/_text_analytics_client_operations.py @@ -8,9 +8,7 @@ from typing import TYPE_CHECKING import warnings -# FIXME: have to manually reconfigure import path for multiapi operation mixin -from ...._lro import AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod -from ...._lro import AnalyzeBatchActionsLROPoller, AnalyzeBatchActionsLROPollingMethod +from ...._lro import AnalyzeBatchActionsLROPoller, AnalyzeBatchActionsLROPollingMethod, AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/__init__.py new file mode 100644 index 000000000000..ca973ce68900 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._text_analytics_client import TextAnalyticsClient +__all__ = ['TextAnalyticsClient'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_configuration.py new file mode 100644 index 000000000000..e216512dcf2e --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_configuration.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import TokenCredential + +VERSION = "unknown" + +class TextAnalyticsClientConfiguration(Configuration): + """Configuration for TextAnalyticsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com). + :type endpoint: str + """ + + def __init__( + self, + credential, # type: "TokenCredential" + endpoint, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + super(TextAnalyticsClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.endpoint = endpoint + self.credential_scopes = kwargs.pop('credential_scopes', ['https://cognitiveservices.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'ai-textanalytics/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_metadata.json b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_metadata.json new file mode 100644 index 000000000000..8f580ec35dc6 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_metadata.json @@ -0,0 +1,264 @@ +{ + "chosen_version": "v3.1-preview.4", + "total_api_version_list": ["v3.1-preview.4"], + "client": { + "name": "TextAnalyticsClient", + "filename": "_text_analytics_client", + "description": "The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Functionality for analysis of text specific to the healthcare domain and personal information are also available in the API. Further documentation can be found in :code:`\u003ca href=\"https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview\"\u003ehttps://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview\u003c/a\u003e`.", + "base_url": null, + "custom_base_url": "\u0027{Endpoint}/text/analytics/v3.1-preview.4\u0027", + "azure_arm": false, + "has_lro_operations": true, + "client_side_validation": false, + "sync_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.core\": [\"PipelineClient\"]}, \"local\": {\"._configuration\": [\"TextAnalyticsClientConfiguration\"], \"._operations_mixin\": [\"TextAnalyticsClientOperationsMixin\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}}", + "async_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.core\": [\"AsyncPipelineClient\"]}, \"local\": {\"._configuration\": [\"TextAnalyticsClientConfiguration\"], \"._operations_mixin\": [\"TextAnalyticsClientOperationsMixin\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}}" + }, + "global_parameters": { + "sync": { + "credential": { + "signature": "credential, # type: \"TokenCredential\"", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials.TokenCredential", + "required": true + }, + "endpoint": { + "signature": "endpoint, # type: str", + "description": "Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).", + "docstring_type": "str", + "required": true + } + }, + "async": { + "credential": { + "signature": "credential: \"AsyncTokenCredential\",", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials_async.AsyncTokenCredential", + "required": true + }, + "endpoint": { + "signature": "endpoint: str,", + "description": "Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).", + "docstring_type": "str", + "required": true + } + }, + "constant": { + }, + "call": "credential, endpoint", + "service_client_specific": { + "sync": { + "api_version": { + "signature": "api_version=None, # type: Optional[str]", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile=KnownProfiles.default, # type: KnownProfiles", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + }, + "async": { + "api_version": { + "signature": "api_version: Optional[str] = None,", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile: KnownProfiles = KnownProfiles.default,", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + } + } + }, + "config": { + "credential": true, + "credential_scopes": ["https://cognitiveservices.azure.com/.default"], + "credential_default_policy_type": "BearerTokenCredentialPolicy", + "credential_default_policy_type_has_async_version": true, + "credential_key_header_name": null, + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"]}, \"local\": {\"._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"]}, \"local\": {\".._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}}" + }, + "operation_groups": { + }, + "operation_mixins": { + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.exceptions\": [\"ClientAuthenticationError\", \"HttpResponseError\", \"ResourceExistsError\", \"ResourceNotFoundError\", \"map_error\"], \"azure.core.pipeline\": [\"PipelineResponse\"], \"azure.core.pipeline.transport\": [\"HttpRequest\", \"HttpResponse\"], \"...._lro\": [\"AnalyzeBatchActionsLROPoller\", \"AnalyzeBatchActionsLROPollingMethod\", \"AnalyzeHealthcareEntitiesLROPoller\", \"AnalyzeHealthcareEntitiesLROPollingMethod\"], \"azure.core.polling\": [\"LROPoller\", \"NoPolling\", \"PollingMethod\"], \"azure.core.polling.base_polling\": [\"LROBasePolling\"]}, \"stdlib\": {\"warnings\": [null]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Callable\", \"Dict\", \"Generic\", \"List\", \"Optional\", \"TypeVar\", \"Union\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.exceptions\": [\"ClientAuthenticationError\", \"HttpResponseError\", \"ResourceExistsError\", \"ResourceNotFoundError\", \"map_error\"], \"azure.core.pipeline\": [\"PipelineResponse\"], \"azure.core.pipeline.transport\": [\"AsyncHttpResponse\", \"HttpRequest\"], \"....._async_lro\": [\"AnalyzeHealthcareEntitiesAsyncLROPoller\", \"AnalyzeHealthcareEntitiesAsyncLROPollingMethod\", \"AsyncAnalyzeBatchActionsLROPoller\", \"AsyncAnalyzeBatchActionsLROPollingMethod\"], \"azure.core.polling\": [\"AsyncLROPoller\", \"AsyncNoPolling\", \"AsyncPollingMethod\"], \"azure.core.polling.async_base_polling\": [\"AsyncLROBasePolling\"]}, \"stdlib\": {\"warnings\": [null]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Callable\", \"Dict\", \"Generic\", \"List\", \"Optional\", \"TypeVar\", \"Union\"]}}}", + "operations": { + "_analyze_initial" : { + "sync": { + "signature": "def _analyze_initial(\n self,\n body=None, # type: Optional[\"_models.AnalyzeBatchInput\"]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _analyze_initial(\n self,\n body: Optional[\"_models.AnalyzeBatchInput\"] = None,\n **kwargs\n) -\u003e Optional[\"_models.AnalyzeJobState\"]:\n", + "doc": "\"\"\"\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "body" + }, + "begin_analyze" : { + "sync": { + "signature": "def begin_analyze(\n self,\n body=None, # type: Optional[\"_models.AnalyzeBatchInput\"]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Submit analysis job.\n\nSubmit a collection of text documents for analysis. Specify one or more unique tasks to be\nexecuted.\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeBatchActionsLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response)\n:rtype: ~...._lro.AnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_analyze(\n self,\n body: Optional[\"_models.AnalyzeBatchInput\"] = None,\n **kwargs\n) -\u003e AsyncAnalyzeBatchActionsLROPoller[\"_models.AnalyzeJobState\"]:\n", + "doc": "\"\"\"Submit analysis job.\n\nSubmit a collection of text documents for analysis. Specify one or more unique tasks to be\nexecuted.\n\n:param body: Collection of documents to analyze and tasks to execute.\n:type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AsyncAnalyzeBatchActionsLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AsyncAnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response)\n:rtype: ~....._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "body" + }, + "analyze_status" : { + "sync": { + "signature": "def analyze_status(\n self,\n job_id, # type: str\n show_stats=None, # type: Optional[bool]\n top=20, # type: Optional[int]\n skip=0, # type: Optional[int]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Get analysis status and results.\n\nGet the status of an analysis job. A job may consist of one or more tasks. Once all tasks are\ncompleted, the job will transition to the completed state and results will be available for\neach task.\n\n:param job_id: Job ID for Analyze.\n:type job_id: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def analyze_status(\n self,\n job_id: str,\n show_stats: Optional[bool] = None,\n top: Optional[int] = 20,\n skip: Optional[int] = 0,\n **kwargs\n) -\u003e \"_models.AnalyzeJobState\":\n", + "doc": "\"\"\"Get analysis status and results.\n\nGet the status of an analysis job. A job may consist of one or more tasks. Once all tasks are\ncompleted, the job will transition to the completed state and results will be available for\neach task.\n\n:param job_id: Job ID for Analyze.\n:type job_id: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: AnalyzeJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id, show_stats, top, skip" + }, + "health_status" : { + "sync": { + "signature": "def health_status(\n self,\n job_id, # type: str\n top=20, # type: Optional[int]\n skip=0, # type: Optional[int]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Get healthcare analysis job status and results.\n\nGet details of the healthcare prediction job specified by the jobId.\n\n:param job_id: Job ID.\n:type job_id: str\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def health_status(\n self,\n job_id: str,\n top: Optional[int] = 20,\n skip: Optional[int] = 0,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.HealthcareJobState\":\n", + "doc": "\"\"\"Get healthcare analysis job status and results.\n\nGet details of the healthcare prediction job specified by the jobId.\n\n:param job_id: Job ID.\n:type job_id: str\n:param top: (Optional) Set the maximum number of results per task. When both $top and $skip are\n specified, $skip is applied first.\n:type top: int\n:param skip: (Optional) Set the number of elements to offset in the response. When both $top\n and $skip are specified, $skip is applied first.\n:type skip: int\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id, top, skip, show_stats" + }, + "_cancel_health_job_initial" : { + "sync": { + "signature": "def _cancel_health_job_initial(\n self,\n job_id, # type: str\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: None, or the result of cls(response)\n:rtype: None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _cancel_health_job_initial(\n self,\n job_id: str,\n **kwargs\n) -\u003e None:\n", + "doc": "\"\"\"\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: None, or the result of cls(response)\n:rtype: None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "job_id" + }, + "begin_cancel_health_job" : { + "sync": { + "signature": "def begin_cancel_health_job(\n self,\n job_id, # type: str\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Cancel healthcare prediction job.\n\nCancel healthcare prediction job.\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the LROBasePolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of LROPoller that returns either None or the result of cls(response)\n:rtype: ~azure.core.polling.LROPoller[None]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_cancel_health_job(\n self,\n job_id: str,\n **kwargs\n) -\u003e AsyncLROPoller[None]:\n", + "doc": "\"\"\"Cancel healthcare prediction job.\n\nCancel healthcare prediction job.\n\n:param job_id: Job ID.\n:type job_id: str\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AsyncLROBasePolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n:rtype: ~azure.core.polling.AsyncLROPoller[None]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "job_id" + }, + "_health_initial" : { + "sync": { + "signature": "def _health_initial(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def _health_initial(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e Optional[\"_models.HealthcareJobState\"]:\n", + "doc": "\"\"\"\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: HealthcareJobState, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState or None\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, string_index_type" + }, + "begin_health" : { + "sync": { + "signature": "def begin_health(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Submit healthcare analysis job.\n\nStart a healthcare analysis job to recognize healthcare related entities (drugs, conditions,\nsymptoms, etc) and their relations.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeHealthcareEntitiesLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.PollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeHealthcareEntitiesLROPoller that returns either HealthcareJobState or the result of cls(response)\n:rtype: ~...._lro.AnalyzeHealthcareEntitiesLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def begin_health(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e AnalyzeHealthcareEntitiesAsyncLROPoller[\"_models.HealthcareJobState\"]:\n", + "doc": "\"\"\"Submit healthcare analysis job.\n\nStart a healthcare analysis job to recognize healthcare related entities (drugs, conditions,\nsymptoms, etc) and their relations.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:keyword str continuation_token: A continuation token to restart a poller from a saved state.\n:keyword polling: Pass in True if you\u0027d like the AnalyzeHealthcareEntitiesAsyncLROPollingMethod polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n:return: An instance of AnalyzeHealthcareEntitiesAsyncLROPoller that returns either HealthcareJobState or the result of cls(response)\n:rtype: ~....._async_lro.AnalyzeHealthcareEntitiesAsyncLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState]\n:raises ~azure.core.exceptions.HttpResponseError:\n\"\"\"" + }, + "call": "documents, model_version, string_index_type" + }, + "entities_recognition_general" : { + "sync": { + "signature": "def entities_recognition_general(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Named Entity Recognition.\n\nThe API returns a list of general named entities in a given document. For the list of supported\nentity types, check :code:`\u003ca href=\"https://aka.ms/taner\"\u003eSupported Entity Types in Text\nAnalytics API\u003c/a\u003e`. See the :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text\nAnalytics API\u003c/a\u003e` for the list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntitiesResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_recognition_general(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.EntitiesResult\":\n", + "doc": "\"\"\"Named Entity Recognition.\n\nThe API returns a list of general named entities in a given document. For the list of supported\nentity types, check :code:`\u003ca href=\"https://aka.ms/taner\"\u003eSupported Entity Types in Text\nAnalytics API\u003c/a\u003e`. See the :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text\nAnalytics API\u003c/a\u003e` for the list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntitiesResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, string_index_type" + }, + "entities_recognition_pii" : { + "sync": { + "signature": "def entities_recognition_pii(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n domain=None, # type: Optional[str]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n pii_categories=None, # type: Optional[List[Union[str, \"_models.PiiCategory\"]]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Entities containing personal information.\n\nThe API returns a list of entities with personal information (\\\"SSN\\\", \\\"Bank Account\\\" etc) in\nthe document. For the list of supported entity types, check :code:`\u003ca\nhref=\"https://aka.ms/tanerpii\"\u003eSupported Entity Types in Text Analytics API\u003c/a\u003e`. See the\n:code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the\nlist of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param domain: (Optional) if specified, will set the PII domain to include only a subset of the\n entity categories. Possible values include: \u0027PHI\u0027, \u0027none\u0027.\n:type domain: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:param pii_categories: (Optional) describes the PII categories to return.\n:type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory]\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: PiiResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_recognition_pii(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n domain: Optional[str] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n pii_categories: Optional[List[Union[str, \"_models.PiiCategory\"]]] = None,\n **kwargs\n) -\u003e \"_models.PiiResult\":\n", + "doc": "\"\"\"Entities containing personal information.\n\nThe API returns a list of entities with personal information (\\\"SSN\\\", \\\"Bank Account\\\" etc) in\nthe document. For the list of supported entity types, check :code:`\u003ca\nhref=\"https://aka.ms/tanerpii\"\u003eSupported Entity Types in Text Analytics API\u003c/a\u003e`. See the\n:code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the\nlist of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param domain: (Optional) if specified, will set the PII domain to include only a subset of the\n entity categories. Possible values include: \u0027PHI\u0027, \u0027none\u0027.\n:type domain: str\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:param pii_categories: (Optional) describes the PII categories to return.\n:type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory]\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: PiiResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, domain, string_index_type, pii_categories" + }, + "entities_linking" : { + "sync": { + "signature": "def entities_linking(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Linked entities from a well known knowledge base.\n\nThe API returns a list of recognized entities with links to a well known knowledge base. See\nthe :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for\nthe list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntityLinkingResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def entities_linking(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.EntityLinkingResult\":\n", + "doc": "\"\"\"Linked entities from a well known knowledge base.\n\nThe API returns a list of recognized entities with links to a well known knowledge base. See\nthe :code:`\u003ca href=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for\nthe list of enabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: EntityLinkingResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, string_index_type" + }, + "key_phrases" : { + "sync": { + "signature": "def key_phrases(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Key Phrases.\n\nThe API returns a list of strings denoting the key phrases in the input text. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: KeyPhraseResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def key_phrases(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.KeyPhraseResult\":\n", + "doc": "\"\"\"Key Phrases.\n\nThe API returns a list of strings denoting the key phrases in the input text. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: KeyPhraseResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats" + }, + "languages" : { + "sync": { + "signature": "def languages(\n self,\n documents, # type: List[\"_models.LanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Detect Language.\n\nThe API returns the detected language and a numeric score between 0 and 1. Scores close to 1\nindicate 100% certainty that the identified language is true. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents:\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: LanguageResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.LanguageResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def languages(\n self,\n documents: List[\"_models.LanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n **kwargs\n) -\u003e \"_models.LanguageResult\":\n", + "doc": "\"\"\"Detect Language.\n\nThe API returns the detected language and a numeric score between 0 and 1. Scores close to 1\nindicate 100% certainty that the identified language is true. See the :code:`\u003ca\nhref=\"https://aka.ms/talangs\"\u003eSupported languages in Text Analytics API\u003c/a\u003e` for the list of\nenabled languages.\n\n:param documents:\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: LanguageResult, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.LanguageResult\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats" + }, + "sentiment" : { + "sync": { + "signature": "def sentiment(\n self,\n documents, # type: List[\"_models.MultiLanguageInput\"]\n model_version=None, # type: Optional[str]\n show_stats=None, # type: Optional[bool]\n opinion_mining=None, # type: Optional[bool]\n string_index_type=\"TextElements_v8\", # type: Optional[Union[str, \"_models.StringIndexType\"]]\n **kwargs # type: Any\n):\n", + "doc": "\"\"\"Sentiment.\n\nThe API returns a detailed sentiment analysis for the input text. The analysis is done in\nmultiple levels of granularity, start from the a document level, down to sentence and key terms\n(targets and assessments).\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param opinion_mining: (Optional) if set to true, response will contain not only sentiment\n prediction but also opinion mining (aspect-based sentiment analysis) results.\n:type opinion_mining: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: SentimentResponse, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentResponse\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "async": { + "coroutine": true, + "signature": "async def sentiment(\n self,\n documents: List[\"_models.MultiLanguageInput\"],\n model_version: Optional[str] = None,\n show_stats: Optional[bool] = None,\n opinion_mining: Optional[bool] = None,\n string_index_type: Optional[Union[str, \"_models.StringIndexType\"]] = \"TextElements_v8\",\n **kwargs\n) -\u003e \"_models.SentimentResponse\":\n", + "doc": "\"\"\"Sentiment.\n\nThe API returns a detailed sentiment analysis for the input text. The analysis is done in\nmultiple levels of granularity, start from the a document level, down to sentence and key terms\n(targets and assessments).\n\n:param documents: The set of documents to process as part of this batch.\n:type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]\n:param model_version: (Optional) This value indicates which model will be used for scoring. If\n a model-version is not specified, the API should default to the latest, non-preview version.\n:type model_version: str\n:param show_stats: (Optional) if set to true, response will contain request and document level\n statistics.\n:type show_stats: bool\n:param opinion_mining: (Optional) if set to true, response will contain not only sentiment\n prediction but also opinion mining (aspect-based sentiment analysis) results.\n:type opinion_mining: bool\n:param string_index_type: (Optional) Specifies the method used to interpret string offsets.\n Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information\n see https://aka.ms/text-analytics-offsets.\n:type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType\n:keyword callable cls: A custom type or function that will be passed the direct response\n:return: SentimentResponse, or the result of cls(response)\n:rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentResponse\n:raises: ~azure.core.exceptions.HttpResponseError\n\"\"\"" + }, + "call": "documents, model_version, show_stats, opinion_mining, string_index_type" + } + } + } +} \ No newline at end of file diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_text_analytics_client.py new file mode 100644 index 000000000000..8d45331521ef --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/_text_analytics_client.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import TokenCredential + +from ._configuration import TextAnalyticsClientConfiguration +from .operations import TextAnalyticsClientOperationsMixin +from . import models + + +class TextAnalyticsClient(TextAnalyticsClientOperationsMixin): + """The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Functionality for analysis of text specific to the healthcare domain and personal information are also available in the API. Further documentation can be found in :code:`https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview`. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com). + :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential, # type: "TokenCredential" + endpoint, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{Endpoint}/text/analytics/v3.1-preview.4' + self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> TextAnalyticsClient + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/__init__.py new file mode 100644 index 000000000000..b64fe33ea84d --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._text_analytics_client import TextAnalyticsClient +__all__ = ['TextAnalyticsClient'] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_configuration.py new file mode 100644 index 000000000000..033d80c38005 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_configuration.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +VERSION = "unknown" + +class TextAnalyticsClientConfiguration(Configuration): + """Configuration for TextAnalyticsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com). + :type endpoint: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + endpoint: str, + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + super(TextAnalyticsClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.endpoint = endpoint + self.credential_scopes = kwargs.pop('credential_scopes', ['https://cognitiveservices.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'ai-textanalytics/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_text_analytics_client.py new file mode 100644 index 000000000000..1882af8b94d6 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/_text_analytics_client.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core import AsyncPipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +from ._configuration import TextAnalyticsClientConfiguration +from .operations import TextAnalyticsClientOperationsMixin +from .. import models + + +class TextAnalyticsClient(TextAnalyticsClientOperationsMixin): + """The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Functionality for analysis of text specific to the healthcare domain and personal information are also available in the API. Further documentation can be found in :code:`https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview`. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com). + :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + endpoint: str, + **kwargs: Any + ) -> None: + base_url = '{Endpoint}/text/analytics/v3.1-preview.4' + self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "TextAnalyticsClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/__init__.py new file mode 100644 index 000000000000..4384511c0346 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin + +__all__ = [ + 'TextAnalyticsClientOperationsMixin', +] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/_text_analytics_client_operations.py new file mode 100644 index 000000000000..b3e27cb29e7a --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/aio/operations/_text_analytics_client_operations.py @@ -0,0 +1,1063 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from ....._async_lro import AnalyzeHealthcareEntitiesAsyncLROPoller, AnalyzeHealthcareEntitiesAsyncLROPollingMethod, AsyncAnalyzeBatchActionsLROPoller, AsyncAnalyzeBatchActionsLROPollingMethod +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.polling.async_base_polling import AsyncLROBasePolling + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class TextAnalyticsClientOperationsMixin: + + async def _analyze_initial( + self, + body: Optional["_models.AnalyzeBatchInput"] = None, + **kwargs + ) -> Optional["_models.AnalyzeJobState"]: + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AnalyzeJobState"]] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self._analyze_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if body is not None: + body_content = self._serialize.body(body, 'AnalyzeBatchInput') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if response.status_code == 202: + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + _analyze_initial.metadata = {'url': '/analyze'} # type: ignore + + async def begin_analyze( + self, + body: Optional["_models.AnalyzeBatchInput"] = None, + **kwargs + ) -> AsyncAnalyzeBatchActionsLROPoller["_models.AnalyzeJobState"]: + """Submit analysis job. + + Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed. + + :param body: Collection of documents to analyze and tasks to execute. + :type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the AsyncAnalyzeBatchActionsLROPollingMethod polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncAnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response) + :rtype: ~....._async_lro.AsyncAnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeJobState"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._analyze_initial( + body=body, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + if polling is True: polling_method = AsyncAnalyzeBatchActionsLROPollingMethod(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncAnalyzeBatchActionsLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncAnalyzeBatchActionsLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_analyze.metadata = {'url': '/analyze'} # type: ignore + + async def analyze_status( + self, + job_id: str, + show_stats: Optional[bool] = None, + top: Optional[int] = 20, + skip: Optional[int] = 0, + **kwargs + ) -> "_models.AnalyzeJobState": + """Get analysis status and results. + + Get the status of an analysis job. A job may consist of one or more tasks. Once all tasks are + completed, the job will transition to the completed state and results will be available for + each task. + + :param job_id: Job ID for Analyze. + :type job_id: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are + specified, $skip is applied first. + :type top: int + :param skip: (Optional) Set the number of elements to offset in the response. When both $top + and $skip are specified, $skip is applied first. + :type skip: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AnalyzeJobState, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeJobState"] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self.analyze_status.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if top is not None: + query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1) + if skip is not None: + query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + analyze_status.metadata = {'url': '/analyze/jobs/{jobId}'} # type: ignore + + async def health_status( + self, + job_id: str, + top: Optional[int] = 20, + skip: Optional[int] = 0, + show_stats: Optional[bool] = None, + **kwargs + ) -> "_models.HealthcareJobState": + """Get healthcare analysis job status and results. + + Get details of the healthcare prediction job specified by the jobId. + + :param job_id: Job ID. + :type job_id: str + :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are + specified, $skip is applied first. + :type top: int + :param skip: (Optional) Set the number of elements to offset in the response. When both $top + and $skip are specified, $skip is applied first. + :type skip: int + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: HealthcareJobState, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthcareJobState"] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self.health_status.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if top is not None: + query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1) + if skip is not None: + query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0) + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + health_status.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + async def _cancel_health_job_initial( + self, + job_id: str, + **kwargs + ) -> None: + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self._cancel_health_job_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, None, response_headers) + + _cancel_health_job_initial.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + async def begin_cancel_health_job( + self, + job_id: str, + **kwargs + ) -> AsyncLROPoller[None]: + """Cancel healthcare prediction job. + + Cancel healthcare prediction job. + + :param job_id: Job ID. + :type job_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the AsyncLROBasePolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[None] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._cancel_health_job_initial( + job_id=job_id, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + if cls: + return cls(pipeline_response, None, {}) + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + + if polling is True: polling_method = AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_cancel_health_job.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + async def _health_initial( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + **kwargs + ) -> Optional["_models.HealthcareJobState"]: + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.HealthcareJobState"]] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self._health_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if response.status_code == 202: + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + _health_initial.metadata = {'url': '/entities/health/jobs'} # type: ignore + + async def begin_health( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + **kwargs + ) -> AnalyzeHealthcareEntitiesAsyncLROPoller["_models.HealthcareJobState"]: + """Submit healthcare analysis job. + + Start a healthcare analysis job to recognize healthcare related entities (drugs, conditions, + symptoms, etc) and their relations. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the AnalyzeHealthcareEntitiesAsyncLROPollingMethod polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AnalyzeHealthcareEntitiesAsyncLROPoller that returns either HealthcareJobState or the result of cls(response) + :rtype: ~....._async_lro.AnalyzeHealthcareEntitiesAsyncLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthcareJobState"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._health_initial( + documents=documents, + model_version=model_version, + string_index_type=string_index_type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + if polling is True: polling_method = AnalyzeHealthcareEntitiesAsyncLROPollingMethod(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AnalyzeHealthcareEntitiesAsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AnalyzeHealthcareEntitiesAsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_health.metadata = {'url': '/entities/health/jobs'} # type: ignore + + async def entities_recognition_general( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + **kwargs + ) -> "_models.EntitiesResult": + """Named Entity Recognition. + + The API returns a list of general named entities in a given document. For the list of supported + entity types, check :code:`Supported Entity Types in Text + Analytics API`. See the :code:`Supported languages in Text + Analytics API` for the list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: EntitiesResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.EntitiesResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_recognition_general.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('EntitiesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_recognition_general.metadata = {'url': '/entities/recognition/general'} # type: ignore + + async def entities_recognition_pii( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + domain: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = None, + **kwargs + ) -> "_models.PiiResult": + """Entities containing personal information. + + The API returns a list of entities with personal information (\"SSN\", \"Bank Account\" etc) in + the document. For the list of supported entity types, check :code:`Supported Entity Types in Text Analytics API`. See the + :code:`Supported languages in Text Analytics API` for the + list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param domain: (Optional) if specified, will set the PII domain to include only a subset of the + entity categories. Possible values include: 'PHI', 'none'. + :type domain: str + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :param pii_categories: (Optional) describes the PII categories to return. + :type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PiiResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PiiResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_recognition_pii.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if domain is not None: + query_parameters['domain'] = self._serialize.query("domain", domain, 'str') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + if pii_categories is not None: + query_parameters['piiCategories'] = self._serialize.query("pii_categories", pii_categories, '[str]', div=',') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('PiiResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_recognition_pii.metadata = {'url': '/entities/recognition/pii'} # type: ignore + + async def entities_linking( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + **kwargs + ) -> "_models.EntityLinkingResult": + """Linked entities from a well known knowledge base. + + The API returns a list of recognized entities with links to a well known knowledge base. See + the :code:`Supported languages in Text Analytics API` for + the list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: EntityLinkingResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.EntityLinkingResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_linking.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('EntityLinkingResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_linking.metadata = {'url': '/entities/linking'} # type: ignore + + async def key_phrases( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + **kwargs + ) -> "_models.KeyPhraseResult": + """Key Phrases. + + The API returns a list of strings denoting the key phrases in the input text. See the :code:`Supported languages in Text Analytics API` for the list of + enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: KeyPhraseResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyPhraseResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.key_phrases.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('KeyPhraseResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + key_phrases.metadata = {'url': '/keyPhrases'} # type: ignore + + async def languages( + self, + documents: List["_models.LanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + **kwargs + ) -> "_models.LanguageResult": + """Detect Language. + + The API returns the detected language and a numeric score between 0 and 1. Scores close to 1 + indicate 100% certainty that the identified language is true. See the :code:`Supported languages in Text Analytics API` for the list of + enabled languages. + + :param documents: + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: LanguageResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.LanguageResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.LanguageResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.LanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.languages.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'LanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('LanguageResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + languages.metadata = {'url': '/languages'} # type: ignore + + async def sentiment( + self, + documents: List["_models.MultiLanguageInput"], + model_version: Optional[str] = None, + show_stats: Optional[bool] = None, + opinion_mining: Optional[bool] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = "TextElements_v8", + **kwargs + ) -> "_models.SentimentResponse": + """Sentiment. + + The API returns a detailed sentiment analysis for the input text. The analysis is done in + multiple levels of granularity, start from the a document level, down to sentence and key terms + (targets and assessments). + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param opinion_mining: (Optional) if set to true, response will contain not only sentiment + prediction but also opinion mining (aspect-based sentiment analysis) results. + :type opinion_mining: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SentimentResponse, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SentimentResponse"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.sentiment.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if opinion_mining is not None: + query_parameters['opinionMining'] = self._serialize.query("opinion_mining", opinion_mining, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('SentimentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + sentiment.metadata = {'url': '/sentiment'} # type: ignore diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/__init__.py new file mode 100644 index 000000000000..2cf115784e44 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/__init__.py @@ -0,0 +1,255 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AnalyzeBatchInput + from ._models_py3 import AnalyzeJobMetadata + from ._models_py3 import AnalyzeJobState + from ._models_py3 import Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1 + from ._models_py3 import Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1 + from ._models_py3 import Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1 + from ._models_py3 import ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1 + from ._models_py3 import DetectedLanguage + from ._models_py3 import DocumentEntities + from ._models_py3 import DocumentError + from ._models_py3 import DocumentHealthcareEntities + from ._models_py3 import DocumentKeyPhrases + from ._models_py3 import DocumentLanguage + from ._models_py3 import DocumentLinkedEntities + from ._models_py3 import DocumentSentiment + from ._models_py3 import DocumentStatistics + from ._models_py3 import EntitiesResult + from ._models_py3 import EntitiesTask + from ._models_py3 import EntitiesTaskParameters + from ._models_py3 import Entity + from ._models_py3 import EntityLinkingResult + from ._models_py3 import EntityLinkingTask + from ._models_py3 import EntityLinkingTaskParameters + from ._models_py3 import ErrorResponse + from ._models_py3 import HealthcareAssertion + from ._models_py3 import HealthcareEntity + from ._models_py3 import HealthcareEntityLink + from ._models_py3 import HealthcareJobState + from ._models_py3 import HealthcareRelation + from ._models_py3 import HealthcareRelationEntity + from ._models_py3 import HealthcareResult + from ._models_py3 import InnerError + from ._models_py3 import JobDescriptor + from ._models_py3 import JobManifest + from ._models_py3 import JobManifestTasks + from ._models_py3 import JobMetadata + from ._models_py3 import KeyPhraseResult + from ._models_py3 import KeyPhrasesTask + from ._models_py3 import KeyPhrasesTaskParameters + from ._models_py3 import LanguageBatchInput + from ._models_py3 import LanguageInput + from ._models_py3 import LanguageResult + from ._models_py3 import LinkedEntity + from ._models_py3 import Match + from ._models_py3 import MultiLanguageBatchInput + from ._models_py3 import MultiLanguageInput + from ._models_py3 import Pagination + from ._models_py3 import PiiDocumentEntities + from ._models_py3 import PiiResult + from ._models_py3 import PiiTask + from ._models_py3 import PiiTaskParameters + from ._models_py3 import RequestStatistics + from ._models_py3 import SentenceAssessment + from ._models_py3 import SentenceSentiment + from ._models_py3 import SentenceTarget + from ._models_py3 import SentimentConfidenceScorePerLabel + from ._models_py3 import SentimentResponse + from ._models_py3 import TargetConfidenceScoreLabel + from ._models_py3 import TargetRelation + from ._models_py3 import TaskState + from ._models_py3 import TasksState + from ._models_py3 import TasksStateTasks + from ._models_py3 import TasksStateTasksDetails + from ._models_py3 import TasksStateTasksEntityLinkingTasksItem + from ._models_py3 import TasksStateTasksEntityRecognitionPiiTasksItem + from ._models_py3 import TasksStateTasksEntityRecognitionTasksItem + from ._models_py3 import TasksStateTasksKeyPhraseExtractionTasksItem + from ._models_py3 import TextAnalyticsError + from ._models_py3 import TextAnalyticsWarning +except (SyntaxError, ImportError): + from ._models import AnalyzeBatchInput # type: ignore + from ._models import AnalyzeJobMetadata # type: ignore + from ._models import AnalyzeJobState # type: ignore + from ._models import Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1 # type: ignore + from ._models import Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1 # type: ignore + from ._models import Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1 # type: ignore + from ._models import ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1 # type: ignore + from ._models import DetectedLanguage # type: ignore + from ._models import DocumentEntities # type: ignore + from ._models import DocumentError # type: ignore + from ._models import DocumentHealthcareEntities # type: ignore + from ._models import DocumentKeyPhrases # type: ignore + from ._models import DocumentLanguage # type: ignore + from ._models import DocumentLinkedEntities # type: ignore + from ._models import DocumentSentiment # type: ignore + from ._models import DocumentStatistics # type: ignore + from ._models import EntitiesResult # type: ignore + from ._models import EntitiesTask # type: ignore + from ._models import EntitiesTaskParameters # type: ignore + from ._models import Entity # type: ignore + from ._models import EntityLinkingResult # type: ignore + from ._models import EntityLinkingTask # type: ignore + from ._models import EntityLinkingTaskParameters # type: ignore + from ._models import ErrorResponse # type: ignore + from ._models import HealthcareAssertion # type: ignore + from ._models import HealthcareEntity # type: ignore + from ._models import HealthcareEntityLink # type: ignore + from ._models import HealthcareJobState # type: ignore + from ._models import HealthcareRelation # type: ignore + from ._models import HealthcareRelationEntity # type: ignore + from ._models import HealthcareResult # type: ignore + from ._models import InnerError # type: ignore + from ._models import JobDescriptor # type: ignore + from ._models import JobManifest # type: ignore + from ._models import JobManifestTasks # type: ignore + from ._models import JobMetadata # type: ignore + from ._models import KeyPhraseResult # type: ignore + from ._models import KeyPhrasesTask # type: ignore + from ._models import KeyPhrasesTaskParameters # type: ignore + from ._models import LanguageBatchInput # type: ignore + from ._models import LanguageInput # type: ignore + from ._models import LanguageResult # type: ignore + from ._models import LinkedEntity # type: ignore + from ._models import Match # type: ignore + from ._models import MultiLanguageBatchInput # type: ignore + from ._models import MultiLanguageInput # type: ignore + from ._models import Pagination # type: ignore + from ._models import PiiDocumentEntities # type: ignore + from ._models import PiiResult # type: ignore + from ._models import PiiTask # type: ignore + from ._models import PiiTaskParameters # type: ignore + from ._models import RequestStatistics # type: ignore + from ._models import SentenceAssessment # type: ignore + from ._models import SentenceSentiment # type: ignore + from ._models import SentenceTarget # type: ignore + from ._models import SentimentConfidenceScorePerLabel # type: ignore + from ._models import SentimentResponse # type: ignore + from ._models import TargetConfidenceScoreLabel # type: ignore + from ._models import TargetRelation # type: ignore + from ._models import TaskState # type: ignore + from ._models import TasksState # type: ignore + from ._models import TasksStateTasks # type: ignore + from ._models import TasksStateTasksDetails # type: ignore + from ._models import TasksStateTasksEntityLinkingTasksItem # type: ignore + from ._models import TasksStateTasksEntityRecognitionPiiTasksItem # type: ignore + from ._models import TasksStateTasksEntityRecognitionTasksItem # type: ignore + from ._models import TasksStateTasksKeyPhraseExtractionTasksItem # type: ignore + from ._models import TextAnalyticsError # type: ignore + from ._models import TextAnalyticsWarning # type: ignore + +from ._text_analytics_client_enums import ( + Association, + Certainty, + Conditionality, + DocumentSentimentValue, + ErrorCodeValue, + InnerErrorCodeValue, + PiiCategory, + PiiTaskParametersDomain, + RelationType, + SentenceSentimentValue, + State, + StringIndexType, + StringIndexTypeResponse, + TargetRelationType, + TokenSentimentValue, + WarningCodeValue, +) + +__all__ = [ + 'AnalyzeBatchInput', + 'AnalyzeJobMetadata', + 'AnalyzeJobState', + 'Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1', + 'Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1', + 'Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1', + 'ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1', + 'DetectedLanguage', + 'DocumentEntities', + 'DocumentError', + 'DocumentHealthcareEntities', + 'DocumentKeyPhrases', + 'DocumentLanguage', + 'DocumentLinkedEntities', + 'DocumentSentiment', + 'DocumentStatistics', + 'EntitiesResult', + 'EntitiesTask', + 'EntitiesTaskParameters', + 'Entity', + 'EntityLinkingResult', + 'EntityLinkingTask', + 'EntityLinkingTaskParameters', + 'ErrorResponse', + 'HealthcareAssertion', + 'HealthcareEntity', + 'HealthcareEntityLink', + 'HealthcareJobState', + 'HealthcareRelation', + 'HealthcareRelationEntity', + 'HealthcareResult', + 'InnerError', + 'JobDescriptor', + 'JobManifest', + 'JobManifestTasks', + 'JobMetadata', + 'KeyPhraseResult', + 'KeyPhrasesTask', + 'KeyPhrasesTaskParameters', + 'LanguageBatchInput', + 'LanguageInput', + 'LanguageResult', + 'LinkedEntity', + 'Match', + 'MultiLanguageBatchInput', + 'MultiLanguageInput', + 'Pagination', + 'PiiDocumentEntities', + 'PiiResult', + 'PiiTask', + 'PiiTaskParameters', + 'RequestStatistics', + 'SentenceAssessment', + 'SentenceSentiment', + 'SentenceTarget', + 'SentimentConfidenceScorePerLabel', + 'SentimentResponse', + 'TargetConfidenceScoreLabel', + 'TargetRelation', + 'TaskState', + 'TasksState', + 'TasksStateTasks', + 'TasksStateTasksDetails', + 'TasksStateTasksEntityLinkingTasksItem', + 'TasksStateTasksEntityRecognitionPiiTasksItem', + 'TasksStateTasksEntityRecognitionTasksItem', + 'TasksStateTasksKeyPhraseExtractionTasksItem', + 'TextAnalyticsError', + 'TextAnalyticsWarning', + 'Association', + 'Certainty', + 'Conditionality', + 'DocumentSentimentValue', + 'ErrorCodeValue', + 'InnerErrorCodeValue', + 'PiiCategory', + 'PiiTaskParametersDomain', + 'RelationType', + 'SentenceSentimentValue', + 'State', + 'StringIndexType', + 'StringIndexTypeResponse', + 'TargetRelationType', + 'TokenSentimentValue', + 'WarningCodeValue', +] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models.py new file mode 100644 index 000000000000..d93b1004c4ec --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models.py @@ -0,0 +1,2524 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class JobManifest(msrest.serialization.Model): + """JobManifest. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. The set of tasks to execute on the input documents. Cannot specify the + same task more than once. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'}, + } + + def __init__( + self, + **kwargs + ): + super(JobManifest, self).__init__(**kwargs) + self.tasks = kwargs['tasks'] + + +class JobDescriptor(msrest.serialization.Model): + """JobDescriptor. + + :param display_name: Optional display name for the analysis job. + :type display_name: str + """ + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(JobDescriptor, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + + +class AnalyzeBatchInput(JobDescriptor, JobManifest): + """AnalyzeBatchInput. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. The set of tasks to execute on the input documents. Cannot specify the + same task more than once. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks + :param display_name: Optional display name for the analysis job. + :type display_name: str + :param analysis_input: Required. Contains a set of input documents to be analyzed by the + service. + :type analysis_input: ~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageBatchInput + """ + + _validation = { + 'tasks': {'required': True}, + 'analysis_input': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeBatchInput, self).__init__(**kwargs) + self.tasks = kwargs['tasks'] + self.analysis_input = kwargs['analysis_input'] + self.display_name = kwargs.get('display_name', None) + self.analysis_input = kwargs['analysis_input'] + + +class JobMetadata(msrest.serialization.Model): + """JobMetadata. + + All required parameters must be populated in order to send to Azure. + + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(JobMetadata, self).__init__(**kwargs) + self.created_date_time = kwargs['created_date_time'] + self.expiration_date_time = kwargs.get('expiration_date_time', None) + self.job_id = kwargs['job_id'] + self.last_update_date_time = kwargs['last_update_date_time'] + self.status = kwargs['status'] + + +class AnalyzeJobMetadata(JobMetadata): + """AnalyzeJobMetadata. + + All required parameters must be populated in order to send to Azure. + + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param display_name: + :type display_name: str + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeJobMetadata, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + + +class Pagination(msrest.serialization.Model): + """Pagination. + + :param next_link: + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Pagination, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + + +class TasksState(msrest.serialization.Model): + """TasksState. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksState, self).__init__(**kwargs) + self.tasks = kwargs['tasks'] + + +class AnalyzeJobState(AnalyzeJobMetadata, TasksState, Pagination): + """AnalyzeJobState. + + All required parameters must be populated in order to send to Azure. + + :param next_link: + :type next_link: str + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasks + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param display_name: + :type display_name: str + :param errors: + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + """ + + _validation = { + 'tasks': {'required': True}, + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeJobState, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.tasks = kwargs['tasks'] + self.errors = kwargs.get('errors', None) + self.statistics = kwargs.get('statistics', None) + self.next_link = kwargs.get('next_link', None) + self.created_date_time = kwargs['created_date_time'] + self.expiration_date_time = kwargs.get('expiration_date_time', None) + self.job_id = kwargs['job_id'] + self.last_update_date_time = kwargs['last_update_date_time'] + self.status = kwargs['status'] + self.display_name = kwargs.get('display_name', None) + self.errors = kwargs.get('errors', None) + self.statistics = kwargs.get('statistics', None) + self.tasks = kwargs['tasks'] + self.created_date_time = kwargs['created_date_time'] + self.expiration_date_time = kwargs.get('expiration_date_time', None) + self.job_id = kwargs['job_id'] + self.last_update_date_time = kwargs['last_update_date_time'] + self.status = kwargs['status'] + self.display_name = kwargs.get('display_name', None) + self.errors = kwargs.get('errors', None) + self.statistics = kwargs.get('statistics', None) + + +class Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1(msrest.serialization.Model): + """Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntitiesResult'}, + } + + def __init__( + self, + **kwargs + ): + super(Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + + +class Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1(msrest.serialization.Model): + """Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'PiiResult'}, + } + + def __init__( + self, + **kwargs + ): + super(Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + + +class Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1(msrest.serialization.Model): + """Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'KeyPhraseResult'}, + } + + def __init__( + self, + **kwargs + ): + super(Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + + +class ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1(msrest.serialization.Model): + """ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntityLinkingResult'}, + } + + def __init__( + self, + **kwargs + ): + super(ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + + +class DetectedLanguage(msrest.serialization.Model): + """DetectedLanguage. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Long name of a detected language (e.g. English, French). + :type name: str + :param iso6391_name: Required. A two letter representation of the detected language according + to the ISO 639-1 standard (e.g. en, fr). + :type iso6391_name: str + :param confidence_score: Required. A confidence score between 0 and 1. Scores close to 1 + indicate 100% certainty that the identified language is true. + :type confidence_score: float + """ + + _validation = { + 'name': {'required': True}, + 'iso6391_name': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(DetectedLanguage, self).__init__(**kwargs) + self.name = kwargs['name'] + self.iso6391_name = kwargs['iso6391_name'] + self.confidence_score = kwargs['confidence_score'] + + +class DocumentEntities(msrest.serialization.Model): + """DocumentEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Recognized entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[Entity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentEntities, self).__init__(**kwargs) + self.id = kwargs['id'] + self.entities = kwargs['entities'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class DocumentError(msrest.serialization.Model): + """DocumentError. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Document Id. + :type id: str + :param error: Required. Document Error. + :type error: ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError + """ + + _validation = { + 'id': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'TextAnalyticsError'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentError, self).__init__(**kwargs) + self.id = kwargs['id'] + self.error = kwargs['error'] + + +class DocumentHealthcareEntities(msrest.serialization.Model): + """DocumentHealthcareEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Healthcare entities. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntity] + :param relations: Required. Healthcare entity relations. + :type relations: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelation] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'relations': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'}, + 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentHealthcareEntities, self).__init__(**kwargs) + self.id = kwargs['id'] + self.entities = kwargs['entities'] + self.relations = kwargs['relations'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class DocumentKeyPhrases(msrest.serialization.Model): + """DocumentKeyPhrases. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param key_phrases: Required. A list of representative words or phrases. The number of key + phrases returned is proportional to the number of words in the input document. + :type key_phrases: list[str] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'key_phrases': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentKeyPhrases, self).__init__(**kwargs) + self.id = kwargs['id'] + self.key_phrases = kwargs['key_phrases'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class DocumentLanguage(msrest.serialization.Model): + """DocumentLanguage. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param detected_language: Required. Detected Language. + :type detected_language: ~azure.ai.textanalytics.v3_1_preview_4.models.DetectedLanguage + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'detected_language': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentLanguage, self).__init__(**kwargs) + self.id = kwargs['id'] + self.detected_language = kwargs['detected_language'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class DocumentLinkedEntities(msrest.serialization.Model): + """DocumentLinkedEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Recognized well known entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.LinkedEntity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[LinkedEntity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentLinkedEntities, self).__init__(**kwargs) + self.id = kwargs['id'] + self.entities = kwargs['entities'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class DocumentSentiment(msrest.serialization.Model): + """DocumentSentiment. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or + Mixed). Possible values include: "positive", "neutral", "negative", "mixed". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentimentValue + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + :param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1 + for each sentiment class. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel + :param sentences: Required. Sentence level sentiment analysis. + :type sentences: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentiment] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + """ + + _validation = { + 'id': {'required': True}, + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'sentences': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'}, + 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentSentiment, self).__init__(**kwargs) + self.id = kwargs['id'] + self.sentiment = kwargs['sentiment'] + self.statistics = kwargs.get('statistics', None) + self.confidence_scores = kwargs['confidence_scores'] + self.sentences = kwargs['sentences'] + self.warnings = kwargs['warnings'] + + +class DocumentStatistics(msrest.serialization.Model): + """if showStats=true was specified in the request this field will contain information about the document payload. + + All required parameters must be populated in order to send to Azure. + + :param characters_count: Required. Number of text elements recognized in the document. + :type characters_count: int + :param transactions_count: Required. Number of transactions for the document. + :type transactions_count: int + """ + + _validation = { + 'characters_count': {'required': True}, + 'transactions_count': {'required': True}, + } + + _attribute_map = { + 'characters_count': {'key': 'charactersCount', 'type': 'int'}, + 'transactions_count': {'key': 'transactionsCount', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(DocumentStatistics, self).__init__(**kwargs) + self.characters_count = kwargs['characters_count'] + self.transactions_count = kwargs['transactions_count'] + + +class EntitiesResult(msrest.serialization.Model): + """EntitiesResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EntitiesResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class EntitiesTask(msrest.serialization.Model): + """EntitiesTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(EntitiesTask, self).__init__(**kwargs) + self.parameters = kwargs.get('parameters', None) + + +class EntitiesTaskParameters(msrest.serialization.Model): + """EntitiesTaskParameters. + + :param model_version: + :type model_version: str + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EntitiesTaskParameters, self).__init__(**kwargs) + self.model_version = kwargs.get('model_version', "latest") + self.string_index_type = kwargs.get('string_index_type', "TextElements_v8") + + +class Entity(msrest.serialization.Model): + """Entity. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. Entity text as appears in the request. + :type text: str + :param category: Required. Entity type. + :type category: str + :param subcategory: (Optional) Entity sub type. + :type subcategory: str + :param offset: Required. Start position for the entity text. Use of different 'stringIndexType' + values can affect the offset returned. + :type offset: int + :param length: Required. Length for the entity text. Use of different 'stringIndexType' values + can affect the length returned. + :type length: int + :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity. + :type confidence_score: float + """ + + _validation = { + 'text': {'required': True}, + 'category': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'category': {'key': 'category', 'type': 'str'}, + 'subcategory': {'key': 'subcategory', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(Entity, self).__init__(**kwargs) + self.text = kwargs['text'] + self.category = kwargs['category'] + self.subcategory = kwargs.get('subcategory', None) + self.offset = kwargs['offset'] + self.length = kwargs['length'] + self.confidence_score = kwargs['confidence_score'] + + +class EntityLinkingResult(msrest.serialization.Model): + """EntityLinkingResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLinkedEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EntityLinkingResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class EntityLinkingTask(msrest.serialization.Model): + """EntityLinkingTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'EntityLinkingTaskParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(EntityLinkingTask, self).__init__(**kwargs) + self.parameters = kwargs.get('parameters', None) + + +class EntityLinkingTaskParameters(msrest.serialization.Model): + """EntityLinkingTaskParameters. + + :param model_version: + :type model_version: str + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(EntityLinkingTaskParameters, self).__init__(**kwargs) + self.model_version = kwargs.get('model_version', "latest") + self.string_index_type = kwargs.get('string_index_type', "TextElements_v8") + + +class ErrorResponse(msrest.serialization.Model): + """ErrorResponse. + + All required parameters must be populated in order to send to Azure. + + :param error: Required. Document Error. + :type error: ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError + """ + + _validation = { + 'error': {'required': True}, + } + + _attribute_map = { + 'error': {'key': 'error', 'type': 'TextAnalyticsError'}, + } + + def __init__( + self, + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.error = kwargs['error'] + + +class HealthcareAssertion(msrest.serialization.Model): + """HealthcareAssertion. + + :param conditionality: Describes any conditionality on the entity. Possible values include: + "Hypothetical", "Conditional". + :type conditionality: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Conditionality + :param certainty: Describes the entities certainty and polarity. Possible values include: + "Positive", "Positive Possible", "Neutral Possible", "Negative Possible", "Negative". + :type certainty: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Certainty + :param association: Describes if the entity is the subject of the text or if it describes + someone else. Possible values include: "subject", "other". + :type association: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Association + """ + + _attribute_map = { + 'conditionality': {'key': 'conditionality', 'type': 'str'}, + 'certainty': {'key': 'certainty', 'type': 'str'}, + 'association': {'key': 'association', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareAssertion, self).__init__(**kwargs) + self.conditionality = kwargs.get('conditionality', None) + self.certainty = kwargs.get('certainty', None) + self.association = kwargs.get('association', None) + + +class HealthcareEntity(Entity): + """HealthcareEntity. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. Entity text as appears in the request. + :type text: str + :param category: Required. Entity type. + :type category: str + :param subcategory: (Optional) Entity sub type. + :type subcategory: str + :param offset: Required. Start position for the entity text. Use of different 'stringIndexType' + values can affect the offset returned. + :type offset: int + :param length: Required. Length for the entity text. Use of different 'stringIndexType' values + can affect the length returned. + :type length: int + :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity. + :type confidence_score: float + :param assertion: + :type assertion: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareAssertion + :param name: Preferred name for the entity. Example: 'histologically' would have a 'name' of + 'histologic'. + :type name: str + :param links: Entity references in known data sources. + :type links: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntityLink] + """ + + _validation = { + 'text': {'required': True}, + 'category': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'category': {'key': 'category', 'type': 'str'}, + 'subcategory': {'key': 'subcategory', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'assertion': {'key': 'assertion', 'type': 'HealthcareAssertion'}, + 'name': {'key': 'name', 'type': 'str'}, + 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareEntity, self).__init__(**kwargs) + self.assertion = kwargs.get('assertion', None) + self.name = kwargs.get('name', None) + self.links = kwargs.get('links', None) + + +class HealthcareEntityLink(msrest.serialization.Model): + """HealthcareEntityLink. + + All required parameters must be populated in order to send to Azure. + + :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV, MSH, etc. + :type data_source: str + :param id: Required. Entity id in the given source catalog. + :type id: str + """ + + _validation = { + 'data_source': {'required': True}, + 'id': {'required': True}, + } + + _attribute_map = { + 'data_source': {'key': 'dataSource', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareEntityLink, self).__init__(**kwargs) + self.data_source = kwargs['data_source'] + self.id = kwargs['id'] + + +class HealthcareJobState(JobMetadata, Pagination): + """HealthcareJobState. + + All required parameters must be populated in order to send to Azure. + + :param next_link: + :type next_link: str + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareResult + :param errors: + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'results': {'key': 'results', 'type': 'HealthcareResult'}, + 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareJobState, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.results = kwargs.get('results', None) + self.errors = kwargs.get('errors', None) + self.created_date_time = kwargs['created_date_time'] + self.expiration_date_time = kwargs.get('expiration_date_time', None) + self.job_id = kwargs['job_id'] + self.last_update_date_time = kwargs['last_update_date_time'] + self.status = kwargs['status'] + self.results = kwargs.get('results', None) + self.errors = kwargs.get('errors', None) + + +class HealthcareRelation(msrest.serialization.Model): + """Every relation is an entity graph of a certain relationType, where all entities are connected and have specific roles within the relation context. + + All required parameters must be populated in order to send to Azure. + + :param relation_type: Required. Type of relation. Examples include: ``DosageOfMedication`` or + 'FrequencyOfMedication', etc. Possible values include: "Abbreviation", + "DirectionOfBodyStructure", "DirectionOfCondition", "DirectionOfExamination", + "DirectionOfTreatment", "DosageOfMedication", "FormOfMedication", "FrequencyOfMedication", + "FrequencyOfTreatment", "QualifierOfCondition", "RelationOfExamination", "RouteOfMedication", + "TimeOfCondition", "TimeOfEvent", "TimeOfExamination", "TimeOfMedication", "TimeOfTreatment", + "UnitOfCondition", "UnitOfExamination", "ValueOfCondition", "ValueOfExamination". + :type relation_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.RelationType + :param entities: Required. The entities in the relation. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelationEntity] + """ + + _validation = { + 'relation_type': {'required': True}, + 'entities': {'required': True}, + } + + _attribute_map = { + 'relation_type': {'key': 'relationType', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[HealthcareRelationEntity]'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareRelation, self).__init__(**kwargs) + self.relation_type = kwargs['relation_type'] + self.entities = kwargs['entities'] + + +class HealthcareRelationEntity(msrest.serialization.Model): + """HealthcareRelationEntity. + + All required parameters must be populated in order to send to Azure. + + :param ref: Required. Reference link object, using a JSON pointer RFC 6901 (URI Fragment + Identifier Representation), pointing to the entity . + :type ref: str + :param role: Required. Role of entity in the relationship. For example: 'CD20-positive diffuse + large B-cell lymphoma' has the following entities with their roles in parenthesis: CD20 + (GeneOrProtein), Positive (Expression), diffuse large B-cell lymphoma (Diagnosis). + :type role: str + """ + + _validation = { + 'ref': {'required': True}, + 'role': {'required': True}, + } + + _attribute_map = { + 'ref': {'key': 'ref', 'type': 'str'}, + 'role': {'key': 'role', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareRelationEntity, self).__init__(**kwargs) + self.ref = kwargs['ref'] + self.role = kwargs['role'] + + +class HealthcareResult(msrest.serialization.Model): + """HealthcareResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentHealthcareEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(HealthcareResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class InnerError(msrest.serialization.Model): + """InnerError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "InvalidParameterValue", + "InvalidRequestBodyFormat", "EmptyRequest", "MissingInputRecords", "InvalidDocument", + "ModelVersionIncorrect", "InvalidDocumentBatch", "UnsupportedLanguageCode", + "InvalidCountryHint". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.InnerErrorCodeValue + :param message: Required. Error message. + :type message: str + :param details: Error details. + :type details: dict[str, str] + :param target: Error target. + :type target: str + :param innererror: Inner error contains more specific information. + :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '{str}'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + } + + def __init__( + self, + **kwargs + ): + super(InnerError, self).__init__(**kwargs) + self.code = kwargs['code'] + self.message = kwargs['message'] + self.details = kwargs.get('details', None) + self.target = kwargs.get('target', None) + self.innererror = kwargs.get('innererror', None) + + +class JobManifestTasks(msrest.serialization.Model): + """The set of tasks to execute on the input documents. Cannot specify the same task more than once. + + :param entity_recognition_tasks: + :type entity_recognition_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTask] + :param entity_recognition_pii_tasks: + :type entity_recognition_pii_tasks: list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiTask] + :param key_phrase_extraction_tasks: + :type key_phrase_extraction_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTask] + :param entity_linking_tasks: + :type entity_linking_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTask] + """ + + _attribute_map = { + 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'}, + 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'}, + 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'}, + 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[EntityLinkingTask]'}, + } + + def __init__( + self, + **kwargs + ): + super(JobManifestTasks, self).__init__(**kwargs) + self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None) + self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None) + self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None) + self.entity_linking_tasks = kwargs.get('entity_linking_tasks', None) + + +class KeyPhraseResult(msrest.serialization.Model): + """KeyPhraseResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentKeyPhrases] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyPhraseResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class KeyPhrasesTask(msrest.serialization.Model): + """KeyPhrasesTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyPhrasesTask, self).__init__(**kwargs) + self.parameters = kwargs.get('parameters', None) + + +class KeyPhrasesTaskParameters(msrest.serialization.Model): + """KeyPhrasesTaskParameters. + + :param model_version: + :type model_version: str + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyPhrasesTaskParameters, self).__init__(**kwargs) + self.model_version = kwargs.get('model_version', "latest") + + +class LanguageBatchInput(msrest.serialization.Model): + """LanguageBatchInput. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput] + """ + + _validation = { + 'documents': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[LanguageInput]'}, + } + + def __init__( + self, + **kwargs + ): + super(LanguageBatchInput, self).__init__(**kwargs) + self.documents = kwargs['documents'] + + +class LanguageInput(msrest.serialization.Model): + """LanguageInput. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param text: Required. + :type text: str + :param country_hint: + :type country_hint: str + """ + + _validation = { + 'id': {'required': True}, + 'text': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'country_hint': {'key': 'countryHint', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LanguageInput, self).__init__(**kwargs) + self.id = kwargs['id'] + self.text = kwargs['text'] + self.country_hint = kwargs.get('country_hint', None) + + +class LanguageResult(msrest.serialization.Model): + """LanguageResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLanguage] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LanguageResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class LinkedEntity(msrest.serialization.Model): + """LinkedEntity. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Entity Linking formal name. + :type name: str + :param matches: Required. List of instances this entity appears in the text. + :type matches: list[~azure.ai.textanalytics.v3_1_preview_4.models.Match] + :param language: Required. Language used in the data source. + :type language: str + :param id: Unique identifier of the recognized entity from the data source. + :type id: str + :param url: Required. URL for the entity's page from the data source. + :type url: str + :param data_source: Required. Data source used to extract entity linking, such as Wiki/Bing + etc. + :type data_source: str + :param bing_id: Bing Entity Search API unique identifier of the recognized entity. + :type bing_id: str + """ + + _validation = { + 'name': {'required': True}, + 'matches': {'required': True}, + 'language': {'required': True}, + 'url': {'required': True}, + 'data_source': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'matches': {'key': 'matches', 'type': '[Match]'}, + 'language': {'key': 'language', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'data_source': {'key': 'dataSource', 'type': 'str'}, + 'bing_id': {'key': 'bingId', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LinkedEntity, self).__init__(**kwargs) + self.name = kwargs['name'] + self.matches = kwargs['matches'] + self.language = kwargs['language'] + self.id = kwargs.get('id', None) + self.url = kwargs['url'] + self.data_source = kwargs['data_source'] + self.bing_id = kwargs.get('bing_id', None) + + +class Match(msrest.serialization.Model): + """Match. + + All required parameters must be populated in order to send to Azure. + + :param confidence_score: Required. If a well known item is recognized, a decimal number + denoting the confidence level between 0 and 1 will be returned. + :type confidence_score: float + :param text: Required. Entity text as appears in the request. + :type text: str + :param offset: Required. Start position for the entity match text. + :type offset: int + :param length: Required. Length for the entity match text. + :type length: int + """ + + _validation = { + 'confidence_score': {'required': True}, + 'text': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + } + + _attribute_map = { + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'text': {'key': 'text', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(Match, self).__init__(**kwargs) + self.confidence_score = kwargs['confidence_score'] + self.text = kwargs['text'] + self.offset = kwargs['offset'] + self.length = kwargs['length'] + + +class MultiLanguageBatchInput(msrest.serialization.Model): + """Contains a set of input documents to be analyzed by the service. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + """ + + _validation = { + 'documents': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'}, + } + + def __init__( + self, + **kwargs + ): + super(MultiLanguageBatchInput, self).__init__(**kwargs) + self.documents = kwargs['documents'] + + +class MultiLanguageInput(msrest.serialization.Model): + """Contains an input document to be analyzed by the service. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique, non-empty document identifier. + :type id: str + :param text: Required. The input text to process. + :type text: str + :param language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For + example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as + default. + :type language: str + """ + + _validation = { + 'id': {'required': True}, + 'text': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MultiLanguageInput, self).__init__(**kwargs) + self.id = kwargs['id'] + self.text = kwargs['text'] + self.language = kwargs.get('language', None) + + +class PiiDocumentEntities(msrest.serialization.Model): + """PiiDocumentEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param redacted_text: Required. Returns redacted text. + :type redacted_text: str + :param entities: Required. Recognized entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'redacted_text': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'redacted_text': {'key': 'redactedText', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[Entity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + **kwargs + ): + super(PiiDocumentEntities, self).__init__(**kwargs) + self.id = kwargs['id'] + self.redacted_text = kwargs['redacted_text'] + self.entities = kwargs['entities'] + self.warnings = kwargs['warnings'] + self.statistics = kwargs.get('statistics', None) + + +class PiiResult(msrest.serialization.Model): + """PiiResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiDocumentEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PiiResult, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class PiiTask(msrest.serialization.Model): + """PiiTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'}, + } + + def __init__( + self, + **kwargs + ): + super(PiiTask, self).__init__(**kwargs) + self.parameters = kwargs.get('parameters', None) + + +class PiiTaskParameters(msrest.serialization.Model): + """PiiTaskParameters. + + :param domain: Possible values include: "phi", "none". Default value: "none". + :type domain: str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParametersDomain + :param model_version: + :type model_version: str + :param pii_categories: (Optional) describes the PII categories to return. + :type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory] + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _validation = { + 'pii_categories': {'unique': True}, + } + + _attribute_map = { + 'domain': {'key': 'domain', 'type': 'str'}, + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'pii_categories': {'key': 'piiCategories', 'type': '[str]'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PiiTaskParameters, self).__init__(**kwargs) + self.domain = kwargs.get('domain', "none") + self.model_version = kwargs.get('model_version', "latest") + self.pii_categories = kwargs.get('pii_categories', None) + self.string_index_type = kwargs.get('string_index_type', "TextElements_v8") + + +class RequestStatistics(msrest.serialization.Model): + """if showStats=true was specified in the request this field will contain information about the request payload. + + All required parameters must be populated in order to send to Azure. + + :param documents_count: Required. Number of documents submitted in the request. + :type documents_count: int + :param valid_documents_count: Required. Number of valid documents. This excludes empty, over- + size limit or non-supported languages documents. + :type valid_documents_count: int + :param erroneous_documents_count: Required. Number of invalid documents. This includes empty, + over-size limit or non-supported languages documents. + :type erroneous_documents_count: int + :param transactions_count: Required. Number of transactions for the request. + :type transactions_count: long + """ + + _validation = { + 'documents_count': {'required': True}, + 'valid_documents_count': {'required': True}, + 'erroneous_documents_count': {'required': True}, + 'transactions_count': {'required': True}, + } + + _attribute_map = { + 'documents_count': {'key': 'documentsCount', 'type': 'int'}, + 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'}, + 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'}, + 'transactions_count': {'key': 'transactionsCount', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(RequestStatistics, self).__init__(**kwargs) + self.documents_count = kwargs['documents_count'] + self.valid_documents_count = kwargs['valid_documents_count'] + self.erroneous_documents_count = kwargs['erroneous_documents_count'] + self.transactions_count = kwargs['transactions_count'] + + +class SentenceAssessment(msrest.serialization.Model): + """SentenceAssessment. + + All required parameters must be populated in order to send to Azure. + + :param sentiment: Required. Assessment sentiment in the sentence. Possible values include: + "positive", "mixed", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue + :param confidence_scores: Required. Assessment sentiment confidence scores in the sentence. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel + :param offset: Required. The assessment offset from the start of the sentence. + :type offset: int + :param length: Required. The length of the assessment. + :type length: int + :param text: Required. The assessment text detected. + :type text: str + :param is_negated: Required. The indicator representing if the assessment is negated. + :type is_negated: bool + """ + + _validation = { + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'text': {'required': True}, + 'is_negated': {'required': True}, + } + + _attribute_map = { + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'text': {'key': 'text', 'type': 'str'}, + 'is_negated': {'key': 'isNegated', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(SentenceAssessment, self).__init__(**kwargs) + self.sentiment = kwargs['sentiment'] + self.confidence_scores = kwargs['confidence_scores'] + self.offset = kwargs['offset'] + self.length = kwargs['length'] + self.text = kwargs['text'] + self.is_negated = kwargs['is_negated'] + + +class SentenceSentiment(msrest.serialization.Model): + """SentenceSentiment. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. The sentence text. + :type text: str + :param sentiment: Required. The predicted Sentiment for the sentence. Possible values include: + "positive", "neutral", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentimentValue + :param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the + sentence for all classes. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel + :param offset: Required. The sentence offset from the start of the document. + :type offset: int + :param length: Required. The length of the sentence. + :type length: int + :param targets: The array of sentence targets for the sentence. + :type targets: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceTarget] + :param assessments: The array of assessments for the sentence. + :type assessments: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceAssessment] + """ + + _validation = { + 'text': {'required': True}, + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'targets': {'key': 'targets', 'type': '[SentenceTarget]'}, + 'assessments': {'key': 'assessments', 'type': '[SentenceAssessment]'}, + } + + def __init__( + self, + **kwargs + ): + super(SentenceSentiment, self).__init__(**kwargs) + self.text = kwargs['text'] + self.sentiment = kwargs['sentiment'] + self.confidence_scores = kwargs['confidence_scores'] + self.offset = kwargs['offset'] + self.length = kwargs['length'] + self.targets = kwargs.get('targets', None) + self.assessments = kwargs.get('assessments', None) + + +class SentenceTarget(msrest.serialization.Model): + """SentenceTarget. + + All required parameters must be populated in order to send to Azure. + + :param sentiment: Required. Targeted sentiment in the sentence. Possible values include: + "positive", "mixed", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue + :param confidence_scores: Required. Target sentiment confidence scores for the target in the + sentence. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel + :param offset: Required. The target offset from the start of the sentence. + :type offset: int + :param length: Required. The length of the target. + :type length: int + :param text: Required. The target text detected. + :type text: str + :param relations: Required. The array of either assessment or target objects which is related + to the target. + :type relations: list[~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelation] + """ + + _validation = { + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'text': {'required': True}, + 'relations': {'required': True}, + } + + _attribute_map = { + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'text': {'key': 'text', 'type': 'str'}, + 'relations': {'key': 'relations', 'type': '[TargetRelation]'}, + } + + def __init__( + self, + **kwargs + ): + super(SentenceTarget, self).__init__(**kwargs) + self.sentiment = kwargs['sentiment'] + self.confidence_scores = kwargs['confidence_scores'] + self.offset = kwargs['offset'] + self.length = kwargs['length'] + self.text = kwargs['text'] + self.relations = kwargs['relations'] + + +class SentimentConfidenceScorePerLabel(msrest.serialization.Model): + """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative. + + All required parameters must be populated in order to send to Azure. + + :param positive: Required. + :type positive: float + :param neutral: Required. + :type neutral: float + :param negative: Required. + :type negative: float + """ + + _validation = { + 'positive': {'required': True}, + 'neutral': {'required': True}, + 'negative': {'required': True}, + } + + _attribute_map = { + 'positive': {'key': 'positive', 'type': 'float'}, + 'neutral': {'key': 'neutral', 'type': 'float'}, + 'negative': {'key': 'negative', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs) + self.positive = kwargs['positive'] + self.neutral = kwargs['neutral'] + self.negative = kwargs['negative'] + + +class SentimentResponse(msrest.serialization.Model): + """SentimentResponse. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Sentiment analysis per document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentiment] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SentimentResponse, self).__init__(**kwargs) + self.documents = kwargs['documents'] + self.errors = kwargs['errors'] + self.statistics = kwargs.get('statistics', None) + self.model_version = kwargs['model_version'] + + +class TargetConfidenceScoreLabel(msrest.serialization.Model): + """Represents the confidence scores across all sentiment classes: positive, neutral, negative. + + All required parameters must be populated in order to send to Azure. + + :param positive: Required. + :type positive: float + :param negative: Required. + :type negative: float + """ + + _validation = { + 'positive': {'required': True}, + 'negative': {'required': True}, + } + + _attribute_map = { + 'positive': {'key': 'positive', 'type': 'float'}, + 'negative': {'key': 'negative', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(TargetConfidenceScoreLabel, self).__init__(**kwargs) + self.positive = kwargs['positive'] + self.negative = kwargs['negative'] + + +class TargetRelation(msrest.serialization.Model): + """TargetRelation. + + All required parameters must be populated in order to send to Azure. + + :param relation_type: Required. The type related to the target. Possible values include: + "assessment", "target". + :type relation_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelationType + :param ref: Required. The JSON pointer indicating the linked object. + :type ref: str + """ + + _validation = { + 'relation_type': {'required': True}, + 'ref': {'required': True}, + } + + _attribute_map = { + 'relation_type': {'key': 'relationType', 'type': 'str'}, + 'ref': {'key': 'ref', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TargetRelation, self).__init__(**kwargs) + self.relation_type = kwargs['relation_type'] + self.ref = kwargs['ref'] + + +class TasksStateTasks(msrest.serialization.Model): + """TasksStateTasks. + + All required parameters must be populated in order to send to Azure. + + :param details: + :type details: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksDetails + :param completed: Required. + :type completed: int + :param failed: Required. + :type failed: int + :param in_progress: Required. + :type in_progress: int + :param total: Required. + :type total: int + :param entity_recognition_tasks: + :type entity_recognition_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityRecognitionTasksItem] + :param entity_recognition_pii_tasks: + :type entity_recognition_pii_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityRecognitionPiiTasksItem] + :param key_phrase_extraction_tasks: + :type key_phrase_extraction_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksKeyPhraseExtractionTasksItem] + :param entity_linking_tasks: + :type entity_linking_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityLinkingTasksItem] + """ + + _validation = { + 'completed': {'required': True}, + 'failed': {'required': True}, + 'in_progress': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'details': {'key': 'details', 'type': 'TasksStateTasksDetails'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + 'in_progress': {'key': 'inProgress', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[TasksStateTasksEntityRecognitionTasksItem]'}, + 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[TasksStateTasksEntityRecognitionPiiTasksItem]'}, + 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[TasksStateTasksKeyPhraseExtractionTasksItem]'}, + 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[TasksStateTasksEntityLinkingTasksItem]'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasks, self).__init__(**kwargs) + self.details = kwargs.get('details', None) + self.completed = kwargs['completed'] + self.failed = kwargs['failed'] + self.in_progress = kwargs['in_progress'] + self.total = kwargs['total'] + self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None) + self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None) + self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None) + self.entity_linking_tasks = kwargs.get('entity_linking_tasks', None) + + +class TaskState(msrest.serialization.Model): + """TaskState. + + All required parameters must be populated in order to send to Azure. + + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TaskState, self).__init__(**kwargs) + self.last_update_date_time = kwargs['last_update_date_time'] + self.name = kwargs.get('name', None) + self.status = kwargs['status'] + + +class TasksStateTasksDetails(TaskState): + """TasksStateTasksDetails. + + All required parameters must be populated in order to send to Azure. + + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasksDetails, self).__init__(**kwargs) + + +class TasksStateTasksEntityLinkingTasksItem(TaskState, ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1): + """TasksStateTasksEntityLinkingTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntityLinkingResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasksEntityLinkingTasksItem, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + self.last_update_date_time = kwargs['last_update_date_time'] + self.name = kwargs.get('name', None) + self.status = kwargs['status'] + + +class TasksStateTasksEntityRecognitionPiiTasksItem(TaskState, Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1): + """TasksStateTasksEntityRecognitionPiiTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'PiiResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasksEntityRecognitionPiiTasksItem, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + self.last_update_date_time = kwargs['last_update_date_time'] + self.name = kwargs.get('name', None) + self.status = kwargs['status'] + + +class TasksStateTasksEntityRecognitionTasksItem(TaskState, Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1): + """TasksStateTasksEntityRecognitionTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntitiesResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasksEntityRecognitionTasksItem, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + self.last_update_date_time = kwargs['last_update_date_time'] + self.name = kwargs.get('name', None) + self.status = kwargs['status'] + + +class TasksStateTasksKeyPhraseExtractionTasksItem(TaskState, Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1): + """TasksStateTasksKeyPhraseExtractionTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'KeyPhraseResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TasksStateTasksKeyPhraseExtractionTasksItem, self).__init__(**kwargs) + self.results = kwargs.get('results', None) + self.last_update_date_time = kwargs['last_update_date_time'] + self.name = kwargs.get('name', None) + self.status = kwargs['status'] + + +class TextAnalyticsError(msrest.serialization.Model): + """TextAnalyticsError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "InvalidRequest", + "InvalidArgument", "InternalServerError", "ServiceUnavailable", "NotFound". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorCodeValue + :param message: Required. Error message. + :type message: str + :param target: Error target. + :type target: str + :param innererror: Inner error contains more specific information. + :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError + :param details: Details about specific errors that led to this reported error. + :type details: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + 'details': {'key': 'details', 'type': '[TextAnalyticsError]'}, + } + + def __init__( + self, + **kwargs + ): + super(TextAnalyticsError, self).__init__(**kwargs) + self.code = kwargs['code'] + self.message = kwargs['message'] + self.target = kwargs.get('target', None) + self.innererror = kwargs.get('innererror', None) + self.details = kwargs.get('details', None) + + +class TextAnalyticsWarning(msrest.serialization.Model): + """TextAnalyticsWarning. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "LongWordsInDocument", + "DocumentTruncated". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.WarningCodeValue + :param message: Required. Warning message. + :type message: str + :param target_ref: A JSON pointer reference indicating the target object. + :type target_ref: str + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target_ref': {'key': 'targetRef', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(TextAnalyticsWarning, self).__init__(**kwargs) + self.code = kwargs['code'] + self.message = kwargs['message'] + self.target_ref = kwargs.get('target_ref', None) diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models_py3.py new file mode 100644 index 000000000000..f3074ced5e21 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_models_py3.py @@ -0,0 +1,2840 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + +from ._text_analytics_client_enums import * + + +class JobManifest(msrest.serialization.Model): + """JobManifest. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. The set of tasks to execute on the input documents. Cannot specify the + same task more than once. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'}, + } + + def __init__( + self, + *, + tasks: "JobManifestTasks", + **kwargs + ): + super(JobManifest, self).__init__(**kwargs) + self.tasks = tasks + + +class JobDescriptor(msrest.serialization.Model): + """JobDescriptor. + + :param display_name: Optional display name for the analysis job. + :type display_name: str + """ + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + } + + def __init__( + self, + *, + display_name: Optional[str] = None, + **kwargs + ): + super(JobDescriptor, self).__init__(**kwargs) + self.display_name = display_name + + +class AnalyzeBatchInput(JobDescriptor, JobManifest): + """AnalyzeBatchInput. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. The set of tasks to execute on the input documents. Cannot specify the + same task more than once. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks + :param display_name: Optional display name for the analysis job. + :type display_name: str + :param analysis_input: Required. Contains a set of input documents to be analyzed by the + service. + :type analysis_input: ~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageBatchInput + """ + + _validation = { + 'tasks': {'required': True}, + 'analysis_input': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'}, + } + + def __init__( + self, + *, + tasks: "JobManifestTasks", + analysis_input: "MultiLanguageBatchInput", + display_name: Optional[str] = None, + **kwargs + ): + super(AnalyzeBatchInput, self).__init__(display_name=display_name, tasks=tasks, **kwargs) + self.tasks = tasks + self.analysis_input = analysis_input + self.display_name = display_name + self.analysis_input = analysis_input + + +class JobMetadata(msrest.serialization.Model): + """JobMetadata. + + All required parameters must be populated in order to send to Azure. + + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + created_date_time: datetime.datetime, + job_id: str, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + expiration_date_time: Optional[datetime.datetime] = None, + **kwargs + ): + super(JobMetadata, self).__init__(**kwargs) + self.created_date_time = created_date_time + self.expiration_date_time = expiration_date_time + self.job_id = job_id + self.last_update_date_time = last_update_date_time + self.status = status + + +class AnalyzeJobMetadata(JobMetadata): + """AnalyzeJobMetadata. + + All required parameters must be populated in order to send to Azure. + + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param display_name: + :type display_name: str + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + } + + def __init__( + self, + *, + created_date_time: datetime.datetime, + job_id: str, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + expiration_date_time: Optional[datetime.datetime] = None, + display_name: Optional[str] = None, + **kwargs + ): + super(AnalyzeJobMetadata, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, **kwargs) + self.display_name = display_name + + +class Pagination(msrest.serialization.Model): + """Pagination. + + :param next_link: + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + **kwargs + ): + super(Pagination, self).__init__(**kwargs) + self.next_link = next_link + + +class TasksState(msrest.serialization.Model): + """TasksState. + + All required parameters must be populated in order to send to Azure. + + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasks + """ + + _validation = { + 'tasks': {'required': True}, + } + + _attribute_map = { + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + } + + def __init__( + self, + *, + tasks: "TasksStateTasks", + **kwargs + ): + super(TasksState, self).__init__(**kwargs) + self.tasks = tasks + + +class AnalyzeJobState(AnalyzeJobMetadata, TasksState, Pagination): + """AnalyzeJobState. + + All required parameters must be populated in order to send to Azure. + + :param next_link: + :type next_link: str + :param tasks: Required. + :type tasks: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasks + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param display_name: + :type display_name: str + :param errors: + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + """ + + _validation = { + 'tasks': {'required': True}, + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + 'tasks': {'key': 'tasks', 'type': 'TasksStateTasks'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + } + + def __init__( + self, + *, + tasks: "TasksStateTasks", + created_date_time: datetime.datetime, + job_id: str, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + next_link: Optional[str] = None, + expiration_date_time: Optional[datetime.datetime] = None, + display_name: Optional[str] = None, + errors: Optional[List["TextAnalyticsError"]] = None, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(AnalyzeJobState, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, display_name=display_name, tasks=tasks, next_link=next_link, **kwargs) + self.next_link = next_link + self.tasks = tasks + self.errors = errors + self.statistics = statistics + self.next_link = next_link + self.created_date_time = created_date_time + self.expiration_date_time = expiration_date_time + self.job_id = job_id + self.last_update_date_time = last_update_date_time + self.status = status + self.display_name = display_name + self.errors = errors + self.statistics = statistics + self.tasks = tasks + self.created_date_time = created_date_time + self.expiration_date_time = expiration_date_time + self.job_id = job_id + self.last_update_date_time = last_update_date_time + self.status = status + self.display_name = display_name + self.errors = errors + self.statistics = statistics + + +class Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1(msrest.serialization.Model): + """Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntitiesResult'}, + } + + def __init__( + self, + *, + results: Optional["EntitiesResult"] = None, + **kwargs + ): + super(Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1, self).__init__(**kwargs) + self.results = results + + +class Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1(msrest.serialization.Model): + """Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'PiiResult'}, + } + + def __init__( + self, + *, + results: Optional["PiiResult"] = None, + **kwargs + ): + super(Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1, self).__init__(**kwargs) + self.results = results + + +class Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1(msrest.serialization.Model): + """Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'KeyPhraseResult'}, + } + + def __init__( + self, + *, + results: Optional["KeyPhraseResult"] = None, + **kwargs + ): + super(Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1, self).__init__(**kwargs) + self.results = results + + +class ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1(msrest.serialization.Model): + """ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + """ + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntityLinkingResult'}, + } + + def __init__( + self, + *, + results: Optional["EntityLinkingResult"] = None, + **kwargs + ): + super(ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1, self).__init__(**kwargs) + self.results = results + + +class DetectedLanguage(msrest.serialization.Model): + """DetectedLanguage. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Long name of a detected language (e.g. English, French). + :type name: str + :param iso6391_name: Required. A two letter representation of the detected language according + to the ISO 639-1 standard (e.g. en, fr). + :type iso6391_name: str + :param confidence_score: Required. A confidence score between 0 and 1. Scores close to 1 + indicate 100% certainty that the identified language is true. + :type confidence_score: float + """ + + _validation = { + 'name': {'required': True}, + 'iso6391_name': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + *, + name: str, + iso6391_name: str, + confidence_score: float, + **kwargs + ): + super(DetectedLanguage, self).__init__(**kwargs) + self.name = name + self.iso6391_name = iso6391_name + self.confidence_score = confidence_score + + +class DocumentEntities(msrest.serialization.Model): + """DocumentEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Recognized entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[Entity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + entities: List["Entity"], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentEntities, self).__init__(**kwargs) + self.id = id + self.entities = entities + self.warnings = warnings + self.statistics = statistics + + +class DocumentError(msrest.serialization.Model): + """DocumentError. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Document Id. + :type id: str + :param error: Required. Document Error. + :type error: ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError + """ + + _validation = { + 'id': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'TextAnalyticsError'}, + } + + def __init__( + self, + *, + id: str, + error: "TextAnalyticsError", + **kwargs + ): + super(DocumentError, self).__init__(**kwargs) + self.id = id + self.error = error + + +class DocumentHealthcareEntities(msrest.serialization.Model): + """DocumentHealthcareEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Healthcare entities. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntity] + :param relations: Required. Healthcare entity relations. + :type relations: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelation] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'relations': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'}, + 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + entities: List["HealthcareEntity"], + relations: List["HealthcareRelation"], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentHealthcareEntities, self).__init__(**kwargs) + self.id = id + self.entities = entities + self.relations = relations + self.warnings = warnings + self.statistics = statistics + + +class DocumentKeyPhrases(msrest.serialization.Model): + """DocumentKeyPhrases. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param key_phrases: Required. A list of representative words or phrases. The number of key + phrases returned is proportional to the number of words in the input document. + :type key_phrases: list[str] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'key_phrases': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + key_phrases: List[str], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentKeyPhrases, self).__init__(**kwargs) + self.id = id + self.key_phrases = key_phrases + self.warnings = warnings + self.statistics = statistics + + +class DocumentLanguage(msrest.serialization.Model): + """DocumentLanguage. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param detected_language: Required. Detected Language. + :type detected_language: ~azure.ai.textanalytics.v3_1_preview_4.models.DetectedLanguage + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'detected_language': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + detected_language: "DetectedLanguage", + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentLanguage, self).__init__(**kwargs) + self.id = id + self.detected_language = detected_language + self.warnings = warnings + self.statistics = statistics + + +class DocumentLinkedEntities(msrest.serialization.Model): + """DocumentLinkedEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param entities: Required. Recognized well known entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.LinkedEntity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[LinkedEntity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + entities: List["LinkedEntity"], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentLinkedEntities, self).__init__(**kwargs) + self.id = id + self.entities = entities + self.warnings = warnings + self.statistics = statistics + + +class DocumentSentiment(msrest.serialization.Model): + """DocumentSentiment. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or + Mixed). Possible values include: "positive", "neutral", "negative", "mixed". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentimentValue + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + :param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1 + for each sentiment class. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel + :param sentences: Required. Sentence level sentiment analysis. + :type sentences: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentiment] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + """ + + _validation = { + 'id': {'required': True}, + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'sentences': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'}, + 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + } + + def __init__( + self, + *, + id: str, + sentiment: Union[str, "DocumentSentimentValue"], + confidence_scores: "SentimentConfidenceScorePerLabel", + sentences: List["SentenceSentiment"], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(DocumentSentiment, self).__init__(**kwargs) + self.id = id + self.sentiment = sentiment + self.statistics = statistics + self.confidence_scores = confidence_scores + self.sentences = sentences + self.warnings = warnings + + +class DocumentStatistics(msrest.serialization.Model): + """if showStats=true was specified in the request this field will contain information about the document payload. + + All required parameters must be populated in order to send to Azure. + + :param characters_count: Required. Number of text elements recognized in the document. + :type characters_count: int + :param transactions_count: Required. Number of transactions for the document. + :type transactions_count: int + """ + + _validation = { + 'characters_count': {'required': True}, + 'transactions_count': {'required': True}, + } + + _attribute_map = { + 'characters_count': {'key': 'charactersCount', 'type': 'int'}, + 'transactions_count': {'key': 'transactionsCount', 'type': 'int'}, + } + + def __init__( + self, + *, + characters_count: int, + transactions_count: int, + **kwargs + ): + super(DocumentStatistics, self).__init__(**kwargs) + self.characters_count = characters_count + self.transactions_count = transactions_count + + +class EntitiesResult(msrest.serialization.Model): + """EntitiesResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentEntities"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(EntitiesResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class EntitiesTask(msrest.serialization.Model): + """EntitiesTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'}, + } + + def __init__( + self, + *, + parameters: Optional["EntitiesTaskParameters"] = None, + **kwargs + ): + super(EntitiesTask, self).__init__(**kwargs) + self.parameters = parameters + + +class EntitiesTaskParameters(msrest.serialization.Model): + """EntitiesTaskParameters. + + :param model_version: + :type model_version: str + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + *, + model_version: Optional[str] = "latest", + string_index_type: Optional[Union[str, "StringIndexTypeResponse"]] = "TextElements_v8", + **kwargs + ): + super(EntitiesTaskParameters, self).__init__(**kwargs) + self.model_version = model_version + self.string_index_type = string_index_type + + +class Entity(msrest.serialization.Model): + """Entity. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. Entity text as appears in the request. + :type text: str + :param category: Required. Entity type. + :type category: str + :param subcategory: (Optional) Entity sub type. + :type subcategory: str + :param offset: Required. Start position for the entity text. Use of different 'stringIndexType' + values can affect the offset returned. + :type offset: int + :param length: Required. Length for the entity text. Use of different 'stringIndexType' values + can affect the length returned. + :type length: int + :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity. + :type confidence_score: float + """ + + _validation = { + 'text': {'required': True}, + 'category': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'category': {'key': 'category', 'type': 'str'}, + 'subcategory': {'key': 'subcategory', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + } + + def __init__( + self, + *, + text: str, + category: str, + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + **kwargs + ): + super(Entity, self).__init__(**kwargs) + self.text = text + self.category = category + self.subcategory = subcategory + self.offset = offset + self.length = length + self.confidence_score = confidence_score + + +class EntityLinkingResult(msrest.serialization.Model): + """EntityLinkingResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLinkedEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentLinkedEntities"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(EntityLinkingResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class EntityLinkingTask(msrest.serialization.Model): + """EntityLinkingTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'EntityLinkingTaskParameters'}, + } + + def __init__( + self, + *, + parameters: Optional["EntityLinkingTaskParameters"] = None, + **kwargs + ): + super(EntityLinkingTask, self).__init__(**kwargs) + self.parameters = parameters + + +class EntityLinkingTaskParameters(msrest.serialization.Model): + """EntityLinkingTaskParameters. + + :param model_version: + :type model_version: str + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + *, + model_version: Optional[str] = "latest", + string_index_type: Optional[Union[str, "StringIndexTypeResponse"]] = "TextElements_v8", + **kwargs + ): + super(EntityLinkingTaskParameters, self).__init__(**kwargs) + self.model_version = model_version + self.string_index_type = string_index_type + + +class ErrorResponse(msrest.serialization.Model): + """ErrorResponse. + + All required parameters must be populated in order to send to Azure. + + :param error: Required. Document Error. + :type error: ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError + """ + + _validation = { + 'error': {'required': True}, + } + + _attribute_map = { + 'error': {'key': 'error', 'type': 'TextAnalyticsError'}, + } + + def __init__( + self, + *, + error: "TextAnalyticsError", + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.error = error + + +class HealthcareAssertion(msrest.serialization.Model): + """HealthcareAssertion. + + :param conditionality: Describes any conditionality on the entity. Possible values include: + "Hypothetical", "Conditional". + :type conditionality: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Conditionality + :param certainty: Describes the entities certainty and polarity. Possible values include: + "Positive", "Positive Possible", "Neutral Possible", "Negative Possible", "Negative". + :type certainty: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Certainty + :param association: Describes if the entity is the subject of the text or if it describes + someone else. Possible values include: "subject", "other". + :type association: str or ~azure.ai.textanalytics.v3_1_preview_4.models.Association + """ + + _attribute_map = { + 'conditionality': {'key': 'conditionality', 'type': 'str'}, + 'certainty': {'key': 'certainty', 'type': 'str'}, + 'association': {'key': 'association', 'type': 'str'}, + } + + def __init__( + self, + *, + conditionality: Optional[Union[str, "Conditionality"]] = None, + certainty: Optional[Union[str, "Certainty"]] = None, + association: Optional[Union[str, "Association"]] = None, + **kwargs + ): + super(HealthcareAssertion, self).__init__(**kwargs) + self.conditionality = conditionality + self.certainty = certainty + self.association = association + + +class HealthcareEntity(Entity): + """HealthcareEntity. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. Entity text as appears in the request. + :type text: str + :param category: Required. Entity type. + :type category: str + :param subcategory: (Optional) Entity sub type. + :type subcategory: str + :param offset: Required. Start position for the entity text. Use of different 'stringIndexType' + values can affect the offset returned. + :type offset: int + :param length: Required. Length for the entity text. Use of different 'stringIndexType' values + can affect the length returned. + :type length: int + :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity. + :type confidence_score: float + :param assertion: + :type assertion: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareAssertion + :param name: Preferred name for the entity. Example: 'histologically' would have a 'name' of + 'histologic'. + :type name: str + :param links: Entity references in known data sources. + :type links: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntityLink] + """ + + _validation = { + 'text': {'required': True}, + 'category': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'confidence_score': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'category': {'key': 'category', 'type': 'str'}, + 'subcategory': {'key': 'subcategory', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'assertion': {'key': 'assertion', 'type': 'HealthcareAssertion'}, + 'name': {'key': 'name', 'type': 'str'}, + 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'}, + } + + def __init__( + self, + *, + text: str, + category: str, + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + assertion: Optional["HealthcareAssertion"] = None, + name: Optional[str] = None, + links: Optional[List["HealthcareEntityLink"]] = None, + **kwargs + ): + super(HealthcareEntity, self).__init__(text=text, category=category, subcategory=subcategory, offset=offset, length=length, confidence_score=confidence_score, **kwargs) + self.assertion = assertion + self.name = name + self.links = links + + +class HealthcareEntityLink(msrest.serialization.Model): + """HealthcareEntityLink. + + All required parameters must be populated in order to send to Azure. + + :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV, MSH, etc. + :type data_source: str + :param id: Required. Entity id in the given source catalog. + :type id: str + """ + + _validation = { + 'data_source': {'required': True}, + 'id': {'required': True}, + } + + _attribute_map = { + 'data_source': {'key': 'dataSource', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + *, + data_source: str, + id: str, + **kwargs + ): + super(HealthcareEntityLink, self).__init__(**kwargs) + self.data_source = data_source + self.id = id + + +class HealthcareJobState(JobMetadata, Pagination): + """HealthcareJobState. + + All required parameters must be populated in order to send to Azure. + + :param next_link: + :type next_link: str + :param created_date_time: Required. + :type created_date_time: ~datetime.datetime + :param expiration_date_time: + :type expiration_date_time: ~datetime.datetime + :param job_id: Required. + :type job_id: str + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareResult + :param errors: + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + """ + + _validation = { + 'created_date_time': {'required': True}, + 'job_id': {'required': True}, + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'next_link': {'key': '@nextLink', 'type': 'str'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'status': {'key': 'status', 'type': 'str'}, + 'results': {'key': 'results', 'type': 'HealthcareResult'}, + 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'}, + } + + def __init__( + self, + *, + created_date_time: datetime.datetime, + job_id: str, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + next_link: Optional[str] = None, + expiration_date_time: Optional[datetime.datetime] = None, + results: Optional["HealthcareResult"] = None, + errors: Optional[List["TextAnalyticsError"]] = None, + **kwargs + ): + super(HealthcareJobState, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, next_link=next_link, **kwargs) + self.next_link = next_link + self.results = results + self.errors = errors + self.created_date_time = created_date_time + self.expiration_date_time = expiration_date_time + self.job_id = job_id + self.last_update_date_time = last_update_date_time + self.status = status + self.results = results + self.errors = errors + + +class HealthcareRelation(msrest.serialization.Model): + """Every relation is an entity graph of a certain relationType, where all entities are connected and have specific roles within the relation context. + + All required parameters must be populated in order to send to Azure. + + :param relation_type: Required. Type of relation. Examples include: ``DosageOfMedication`` or + 'FrequencyOfMedication', etc. Possible values include: "Abbreviation", + "DirectionOfBodyStructure", "DirectionOfCondition", "DirectionOfExamination", + "DirectionOfTreatment", "DosageOfMedication", "FormOfMedication", "FrequencyOfMedication", + "FrequencyOfTreatment", "QualifierOfCondition", "RelationOfExamination", "RouteOfMedication", + "TimeOfCondition", "TimeOfEvent", "TimeOfExamination", "TimeOfMedication", "TimeOfTreatment", + "UnitOfCondition", "UnitOfExamination", "ValueOfCondition", "ValueOfExamination". + :type relation_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.RelationType + :param entities: Required. The entities in the relation. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelationEntity] + """ + + _validation = { + 'relation_type': {'required': True}, + 'entities': {'required': True}, + } + + _attribute_map = { + 'relation_type': {'key': 'relationType', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[HealthcareRelationEntity]'}, + } + + def __init__( + self, + *, + relation_type: Union[str, "RelationType"], + entities: List["HealthcareRelationEntity"], + **kwargs + ): + super(HealthcareRelation, self).__init__(**kwargs) + self.relation_type = relation_type + self.entities = entities + + +class HealthcareRelationEntity(msrest.serialization.Model): + """HealthcareRelationEntity. + + All required parameters must be populated in order to send to Azure. + + :param ref: Required. Reference link object, using a JSON pointer RFC 6901 (URI Fragment + Identifier Representation), pointing to the entity . + :type ref: str + :param role: Required. Role of entity in the relationship. For example: 'CD20-positive diffuse + large B-cell lymphoma' has the following entities with their roles in parenthesis: CD20 + (GeneOrProtein), Positive (Expression), diffuse large B-cell lymphoma (Diagnosis). + :type role: str + """ + + _validation = { + 'ref': {'required': True}, + 'role': {'required': True}, + } + + _attribute_map = { + 'ref': {'key': 'ref', 'type': 'str'}, + 'role': {'key': 'role', 'type': 'str'}, + } + + def __init__( + self, + *, + ref: str, + role: str, + **kwargs + ): + super(HealthcareRelationEntity, self).__init__(**kwargs) + self.ref = ref + self.role = role + + +class HealthcareResult(msrest.serialization.Model): + """HealthcareResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentHealthcareEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentHealthcareEntities"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(HealthcareResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class InnerError(msrest.serialization.Model): + """InnerError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "InvalidParameterValue", + "InvalidRequestBodyFormat", "EmptyRequest", "MissingInputRecords", "InvalidDocument", + "ModelVersionIncorrect", "InvalidDocumentBatch", "UnsupportedLanguageCode", + "InvalidCountryHint". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.InnerErrorCodeValue + :param message: Required. Error message. + :type message: str + :param details: Error details. + :type details: dict[str, str] + :param target: Error target. + :type target: str + :param innererror: Inner error contains more specific information. + :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '{str}'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + } + + def __init__( + self, + *, + code: Union[str, "InnerErrorCodeValue"], + message: str, + details: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + innererror: Optional["InnerError"] = None, + **kwargs + ): + super(InnerError, self).__init__(**kwargs) + self.code = code + self.message = message + self.details = details + self.target = target + self.innererror = innererror + + +class JobManifestTasks(msrest.serialization.Model): + """The set of tasks to execute on the input documents. Cannot specify the same task more than once. + + :param entity_recognition_tasks: + :type entity_recognition_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTask] + :param entity_recognition_pii_tasks: + :type entity_recognition_pii_tasks: list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiTask] + :param key_phrase_extraction_tasks: + :type key_phrase_extraction_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTask] + :param entity_linking_tasks: + :type entity_linking_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTask] + """ + + _attribute_map = { + 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'}, + 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'}, + 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'}, + 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[EntityLinkingTask]'}, + } + + def __init__( + self, + *, + entity_recognition_tasks: Optional[List["EntitiesTask"]] = None, + entity_recognition_pii_tasks: Optional[List["PiiTask"]] = None, + key_phrase_extraction_tasks: Optional[List["KeyPhrasesTask"]] = None, + entity_linking_tasks: Optional[List["EntityLinkingTask"]] = None, + **kwargs + ): + super(JobManifestTasks, self).__init__(**kwargs) + self.entity_recognition_tasks = entity_recognition_tasks + self.entity_recognition_pii_tasks = entity_recognition_pii_tasks + self.key_phrase_extraction_tasks = key_phrase_extraction_tasks + self.entity_linking_tasks = entity_linking_tasks + + +class KeyPhraseResult(msrest.serialization.Model): + """KeyPhraseResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentKeyPhrases] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentKeyPhrases"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(KeyPhraseResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class KeyPhrasesTask(msrest.serialization.Model): + """KeyPhrasesTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'}, + } + + def __init__( + self, + *, + parameters: Optional["KeyPhrasesTaskParameters"] = None, + **kwargs + ): + super(KeyPhrasesTask, self).__init__(**kwargs) + self.parameters = parameters + + +class KeyPhrasesTaskParameters(msrest.serialization.Model): + """KeyPhrasesTaskParameters. + + :param model_version: + :type model_version: str + """ + + _attribute_map = { + 'model_version': {'key': 'model-version', 'type': 'str'}, + } + + def __init__( + self, + *, + model_version: Optional[str] = "latest", + **kwargs + ): + super(KeyPhrasesTaskParameters, self).__init__(**kwargs) + self.model_version = model_version + + +class LanguageBatchInput(msrest.serialization.Model): + """LanguageBatchInput. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput] + """ + + _validation = { + 'documents': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[LanguageInput]'}, + } + + def __init__( + self, + *, + documents: List["LanguageInput"], + **kwargs + ): + super(LanguageBatchInput, self).__init__(**kwargs) + self.documents = documents + + +class LanguageInput(msrest.serialization.Model): + """LanguageInput. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param text: Required. + :type text: str + :param country_hint: + :type country_hint: str + """ + + _validation = { + 'id': {'required': True}, + 'text': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'country_hint': {'key': 'countryHint', 'type': 'str'}, + } + + def __init__( + self, + *, + id: str, + text: str, + country_hint: Optional[str] = None, + **kwargs + ): + super(LanguageInput, self).__init__(**kwargs) + self.id = id + self.text = text + self.country_hint = country_hint + + +class LanguageResult(msrest.serialization.Model): + """LanguageResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLanguage] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentLanguage"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(LanguageResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class LinkedEntity(msrest.serialization.Model): + """LinkedEntity. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Entity Linking formal name. + :type name: str + :param matches: Required. List of instances this entity appears in the text. + :type matches: list[~azure.ai.textanalytics.v3_1_preview_4.models.Match] + :param language: Required. Language used in the data source. + :type language: str + :param id: Unique identifier of the recognized entity from the data source. + :type id: str + :param url: Required. URL for the entity's page from the data source. + :type url: str + :param data_source: Required. Data source used to extract entity linking, such as Wiki/Bing + etc. + :type data_source: str + :param bing_id: Bing Entity Search API unique identifier of the recognized entity. + :type bing_id: str + """ + + _validation = { + 'name': {'required': True}, + 'matches': {'required': True}, + 'language': {'required': True}, + 'url': {'required': True}, + 'data_source': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'matches': {'key': 'matches', 'type': '[Match]'}, + 'language': {'key': 'language', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'data_source': {'key': 'dataSource', 'type': 'str'}, + 'bing_id': {'key': 'bingId', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + matches: List["Match"], + language: str, + url: str, + data_source: str, + id: Optional[str] = None, + bing_id: Optional[str] = None, + **kwargs + ): + super(LinkedEntity, self).__init__(**kwargs) + self.name = name + self.matches = matches + self.language = language + self.id = id + self.url = url + self.data_source = data_source + self.bing_id = bing_id + + +class Match(msrest.serialization.Model): + """Match. + + All required parameters must be populated in order to send to Azure. + + :param confidence_score: Required. If a well known item is recognized, a decimal number + denoting the confidence level between 0 and 1 will be returned. + :type confidence_score: float + :param text: Required. Entity text as appears in the request. + :type text: str + :param offset: Required. Start position for the entity match text. + :type offset: int + :param length: Required. Length for the entity match text. + :type length: int + """ + + _validation = { + 'confidence_score': {'required': True}, + 'text': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + } + + _attribute_map = { + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'text': {'key': 'text', 'type': 'str'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + } + + def __init__( + self, + *, + confidence_score: float, + text: str, + offset: int, + length: int, + **kwargs + ): + super(Match, self).__init__(**kwargs) + self.confidence_score = confidence_score + self.text = text + self.offset = offset + self.length = length + + +class MultiLanguageBatchInput(msrest.serialization.Model): + """Contains a set of input documents to be analyzed by the service. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + """ + + _validation = { + 'documents': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'}, + } + + def __init__( + self, + *, + documents: List["MultiLanguageInput"], + **kwargs + ): + super(MultiLanguageBatchInput, self).__init__(**kwargs) + self.documents = documents + + +class MultiLanguageInput(msrest.serialization.Model): + """Contains an input document to be analyzed by the service. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique, non-empty document identifier. + :type id: str + :param text: Required. The input text to process. + :type text: str + :param language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For + example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as + default. + :type language: str + """ + + _validation = { + 'id': {'required': True}, + 'text': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + } + + def __init__( + self, + *, + id: str, + text: str, + language: Optional[str] = None, + **kwargs + ): + super(MultiLanguageInput, self).__init__(**kwargs) + self.id = id + self.text = text + self.language = language + + +class PiiDocumentEntities(msrest.serialization.Model): + """PiiDocumentEntities. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Unique, non-empty document identifier. + :type id: str + :param redacted_text: Required. Returns redacted text. + :type redacted_text: str + :param entities: Required. Recognized entities in the document. + :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity] + :param warnings: Required. Warnings encountered while processing document. + :type warnings: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning] + :param statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics + """ + + _validation = { + 'id': {'required': True}, + 'redacted_text': {'required': True}, + 'entities': {'required': True}, + 'warnings': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'redacted_text': {'key': 'redactedText', 'type': 'str'}, + 'entities': {'key': 'entities', 'type': '[Entity]'}, + 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'}, + 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'}, + } + + def __init__( + self, + *, + id: str, + redacted_text: str, + entities: List["Entity"], + warnings: List["TextAnalyticsWarning"], + statistics: Optional["DocumentStatistics"] = None, + **kwargs + ): + super(PiiDocumentEntities, self).__init__(**kwargs) + self.id = id + self.redacted_text = redacted_text + self.entities = entities + self.warnings = warnings + self.statistics = statistics + + +class PiiResult(msrest.serialization.Model): + """PiiResult. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Response by document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiDocumentEntities] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["PiiDocumentEntities"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(PiiResult, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class PiiTask(msrest.serialization.Model): + """PiiTask. + + :param parameters: + :type parameters: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParameters + """ + + _attribute_map = { + 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'}, + } + + def __init__( + self, + *, + parameters: Optional["PiiTaskParameters"] = None, + **kwargs + ): + super(PiiTask, self).__init__(**kwargs) + self.parameters = parameters + + +class PiiTaskParameters(msrest.serialization.Model): + """PiiTaskParameters. + + :param domain: Possible values include: "phi", "none". Default value: "none". + :type domain: str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParametersDomain + :param model_version: + :type model_version: str + :param pii_categories: (Optional) describes the PII categories to return. + :type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory] + :param string_index_type: Possible values include: "TextElements_v8", "UnicodeCodePoint", + "Utf16CodeUnit". Default value: "TextElements_v8". + :type string_index_type: str or + ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse + """ + + _validation = { + 'pii_categories': {'unique': True}, + } + + _attribute_map = { + 'domain': {'key': 'domain', 'type': 'str'}, + 'model_version': {'key': 'model-version', 'type': 'str'}, + 'pii_categories': {'key': 'piiCategories', 'type': '[str]'}, + 'string_index_type': {'key': 'stringIndexType', 'type': 'str'}, + } + + def __init__( + self, + *, + domain: Optional[Union[str, "PiiTaskParametersDomain"]] = "none", + model_version: Optional[str] = "latest", + pii_categories: Optional[List[Union[str, "PiiCategory"]]] = None, + string_index_type: Optional[Union[str, "StringIndexTypeResponse"]] = "TextElements_v8", + **kwargs + ): + super(PiiTaskParameters, self).__init__(**kwargs) + self.domain = domain + self.model_version = model_version + self.pii_categories = pii_categories + self.string_index_type = string_index_type + + +class RequestStatistics(msrest.serialization.Model): + """if showStats=true was specified in the request this field will contain information about the request payload. + + All required parameters must be populated in order to send to Azure. + + :param documents_count: Required. Number of documents submitted in the request. + :type documents_count: int + :param valid_documents_count: Required. Number of valid documents. This excludes empty, over- + size limit or non-supported languages documents. + :type valid_documents_count: int + :param erroneous_documents_count: Required. Number of invalid documents. This includes empty, + over-size limit or non-supported languages documents. + :type erroneous_documents_count: int + :param transactions_count: Required. Number of transactions for the request. + :type transactions_count: long + """ + + _validation = { + 'documents_count': {'required': True}, + 'valid_documents_count': {'required': True}, + 'erroneous_documents_count': {'required': True}, + 'transactions_count': {'required': True}, + } + + _attribute_map = { + 'documents_count': {'key': 'documentsCount', 'type': 'int'}, + 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'}, + 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'}, + 'transactions_count': {'key': 'transactionsCount', 'type': 'long'}, + } + + def __init__( + self, + *, + documents_count: int, + valid_documents_count: int, + erroneous_documents_count: int, + transactions_count: int, + **kwargs + ): + super(RequestStatistics, self).__init__(**kwargs) + self.documents_count = documents_count + self.valid_documents_count = valid_documents_count + self.erroneous_documents_count = erroneous_documents_count + self.transactions_count = transactions_count + + +class SentenceAssessment(msrest.serialization.Model): + """SentenceAssessment. + + All required parameters must be populated in order to send to Azure. + + :param sentiment: Required. Assessment sentiment in the sentence. Possible values include: + "positive", "mixed", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue + :param confidence_scores: Required. Assessment sentiment confidence scores in the sentence. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel + :param offset: Required. The assessment offset from the start of the sentence. + :type offset: int + :param length: Required. The length of the assessment. + :type length: int + :param text: Required. The assessment text detected. + :type text: str + :param is_negated: Required. The indicator representing if the assessment is negated. + :type is_negated: bool + """ + + _validation = { + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'text': {'required': True}, + 'is_negated': {'required': True}, + } + + _attribute_map = { + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'text': {'key': 'text', 'type': 'str'}, + 'is_negated': {'key': 'isNegated', 'type': 'bool'}, + } + + def __init__( + self, + *, + sentiment: Union[str, "TokenSentimentValue"], + confidence_scores: "TargetConfidenceScoreLabel", + offset: int, + length: int, + text: str, + is_negated: bool, + **kwargs + ): + super(SentenceAssessment, self).__init__(**kwargs) + self.sentiment = sentiment + self.confidence_scores = confidence_scores + self.offset = offset + self.length = length + self.text = text + self.is_negated = is_negated + + +class SentenceSentiment(msrest.serialization.Model): + """SentenceSentiment. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. The sentence text. + :type text: str + :param sentiment: Required. The predicted Sentiment for the sentence. Possible values include: + "positive", "neutral", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentimentValue + :param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the + sentence for all classes. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel + :param offset: Required. The sentence offset from the start of the document. + :type offset: int + :param length: Required. The length of the sentence. + :type length: int + :param targets: The array of sentence targets for the sentence. + :type targets: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceTarget] + :param assessments: The array of assessments for the sentence. + :type assessments: list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceAssessment] + """ + + _validation = { + 'text': {'required': True}, + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'targets': {'key': 'targets', 'type': '[SentenceTarget]'}, + 'assessments': {'key': 'assessments', 'type': '[SentenceAssessment]'}, + } + + def __init__( + self, + *, + text: str, + sentiment: Union[str, "SentenceSentimentValue"], + confidence_scores: "SentimentConfidenceScorePerLabel", + offset: int, + length: int, + targets: Optional[List["SentenceTarget"]] = None, + assessments: Optional[List["SentenceAssessment"]] = None, + **kwargs + ): + super(SentenceSentiment, self).__init__(**kwargs) + self.text = text + self.sentiment = sentiment + self.confidence_scores = confidence_scores + self.offset = offset + self.length = length + self.targets = targets + self.assessments = assessments + + +class SentenceTarget(msrest.serialization.Model): + """SentenceTarget. + + All required parameters must be populated in order to send to Azure. + + :param sentiment: Required. Targeted sentiment in the sentence. Possible values include: + "positive", "mixed", "negative". + :type sentiment: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue + :param confidence_scores: Required. Target sentiment confidence scores for the target in the + sentence. + :type confidence_scores: + ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel + :param offset: Required. The target offset from the start of the sentence. + :type offset: int + :param length: Required. The length of the target. + :type length: int + :param text: Required. The target text detected. + :type text: str + :param relations: Required. The array of either assessment or target objects which is related + to the target. + :type relations: list[~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelation] + """ + + _validation = { + 'sentiment': {'required': True}, + 'confidence_scores': {'required': True}, + 'offset': {'required': True}, + 'length': {'required': True}, + 'text': {'required': True}, + 'relations': {'required': True}, + } + + _attribute_map = { + 'sentiment': {'key': 'sentiment', 'type': 'str'}, + 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, + 'text': {'key': 'text', 'type': 'str'}, + 'relations': {'key': 'relations', 'type': '[TargetRelation]'}, + } + + def __init__( + self, + *, + sentiment: Union[str, "TokenSentimentValue"], + confidence_scores: "TargetConfidenceScoreLabel", + offset: int, + length: int, + text: str, + relations: List["TargetRelation"], + **kwargs + ): + super(SentenceTarget, self).__init__(**kwargs) + self.sentiment = sentiment + self.confidence_scores = confidence_scores + self.offset = offset + self.length = length + self.text = text + self.relations = relations + + +class SentimentConfidenceScorePerLabel(msrest.serialization.Model): + """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative. + + All required parameters must be populated in order to send to Azure. + + :param positive: Required. + :type positive: float + :param neutral: Required. + :type neutral: float + :param negative: Required. + :type negative: float + """ + + _validation = { + 'positive': {'required': True}, + 'neutral': {'required': True}, + 'negative': {'required': True}, + } + + _attribute_map = { + 'positive': {'key': 'positive', 'type': 'float'}, + 'neutral': {'key': 'neutral', 'type': 'float'}, + 'negative': {'key': 'negative', 'type': 'float'}, + } + + def __init__( + self, + *, + positive: float, + neutral: float, + negative: float, + **kwargs + ): + super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs) + self.positive = positive + self.neutral = neutral + self.negative = negative + + +class SentimentResponse(msrest.serialization.Model): + """SentimentResponse. + + All required parameters must be populated in order to send to Azure. + + :param documents: Required. Sentiment analysis per document. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentiment] + :param errors: Required. Errors by document id. + :type errors: list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError] + :param statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :type statistics: ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics + :param model_version: Required. This field indicates which model is used for scoring. + :type model_version: str + """ + + _validation = { + 'documents': {'required': True}, + 'errors': {'required': True}, + 'model_version': {'required': True}, + } + + _attribute_map = { + 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'}, + 'errors': {'key': 'errors', 'type': '[DocumentError]'}, + 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'}, + 'model_version': {'key': 'modelVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + documents: List["DocumentSentiment"], + errors: List["DocumentError"], + model_version: str, + statistics: Optional["RequestStatistics"] = None, + **kwargs + ): + super(SentimentResponse, self).__init__(**kwargs) + self.documents = documents + self.errors = errors + self.statistics = statistics + self.model_version = model_version + + +class TargetConfidenceScoreLabel(msrest.serialization.Model): + """Represents the confidence scores across all sentiment classes: positive, neutral, negative. + + All required parameters must be populated in order to send to Azure. + + :param positive: Required. + :type positive: float + :param negative: Required. + :type negative: float + """ + + _validation = { + 'positive': {'required': True}, + 'negative': {'required': True}, + } + + _attribute_map = { + 'positive': {'key': 'positive', 'type': 'float'}, + 'negative': {'key': 'negative', 'type': 'float'}, + } + + def __init__( + self, + *, + positive: float, + negative: float, + **kwargs + ): + super(TargetConfidenceScoreLabel, self).__init__(**kwargs) + self.positive = positive + self.negative = negative + + +class TargetRelation(msrest.serialization.Model): + """TargetRelation. + + All required parameters must be populated in order to send to Azure. + + :param relation_type: Required. The type related to the target. Possible values include: + "assessment", "target". + :type relation_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelationType + :param ref: Required. The JSON pointer indicating the linked object. + :type ref: str + """ + + _validation = { + 'relation_type': {'required': True}, + 'ref': {'required': True}, + } + + _attribute_map = { + 'relation_type': {'key': 'relationType', 'type': 'str'}, + 'ref': {'key': 'ref', 'type': 'str'}, + } + + def __init__( + self, + *, + relation_type: Union[str, "TargetRelationType"], + ref: str, + **kwargs + ): + super(TargetRelation, self).__init__(**kwargs) + self.relation_type = relation_type + self.ref = ref + + +class TasksStateTasks(msrest.serialization.Model): + """TasksStateTasks. + + All required parameters must be populated in order to send to Azure. + + :param details: + :type details: ~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksDetails + :param completed: Required. + :type completed: int + :param failed: Required. + :type failed: int + :param in_progress: Required. + :type in_progress: int + :param total: Required. + :type total: int + :param entity_recognition_tasks: + :type entity_recognition_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityRecognitionTasksItem] + :param entity_recognition_pii_tasks: + :type entity_recognition_pii_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityRecognitionPiiTasksItem] + :param key_phrase_extraction_tasks: + :type key_phrase_extraction_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksKeyPhraseExtractionTasksItem] + :param entity_linking_tasks: + :type entity_linking_tasks: + list[~azure.ai.textanalytics.v3_1_preview_4.models.TasksStateTasksEntityLinkingTasksItem] + """ + + _validation = { + 'completed': {'required': True}, + 'failed': {'required': True}, + 'in_progress': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'details': {'key': 'details', 'type': 'TasksStateTasksDetails'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + 'in_progress': {'key': 'inProgress', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[TasksStateTasksEntityRecognitionTasksItem]'}, + 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[TasksStateTasksEntityRecognitionPiiTasksItem]'}, + 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[TasksStateTasksKeyPhraseExtractionTasksItem]'}, + 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[TasksStateTasksEntityLinkingTasksItem]'}, + } + + def __init__( + self, + *, + completed: int, + failed: int, + in_progress: int, + total: int, + details: Optional["TasksStateTasksDetails"] = None, + entity_recognition_tasks: Optional[List["TasksStateTasksEntityRecognitionTasksItem"]] = None, + entity_recognition_pii_tasks: Optional[List["TasksStateTasksEntityRecognitionPiiTasksItem"]] = None, + key_phrase_extraction_tasks: Optional[List["TasksStateTasksKeyPhraseExtractionTasksItem"]] = None, + entity_linking_tasks: Optional[List["TasksStateTasksEntityLinkingTasksItem"]] = None, + **kwargs + ): + super(TasksStateTasks, self).__init__(**kwargs) + self.details = details + self.completed = completed + self.failed = failed + self.in_progress = in_progress + self.total = total + self.entity_recognition_tasks = entity_recognition_tasks + self.entity_recognition_pii_tasks = entity_recognition_pii_tasks + self.key_phrase_extraction_tasks = key_phrase_extraction_tasks + self.entity_linking_tasks = entity_linking_tasks + + +class TaskState(msrest.serialization.Model): + """TaskState. + + All required parameters must be populated in order to send to Azure. + + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + name: Optional[str] = None, + **kwargs + ): + super(TaskState, self).__init__(**kwargs) + self.last_update_date_time = last_update_date_time + self.name = name + self.status = status + + +class TasksStateTasksDetails(TaskState): + """TasksStateTasksDetails. + + All required parameters must be populated in order to send to Azure. + + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + name: Optional[str] = None, + **kwargs + ): + super(TasksStateTasksDetails, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs) + + +class TasksStateTasksEntityLinkingTasksItem(TaskState, ComponentsIfu7BjSchemasTasksstatePropertiesTasksPropertiesEntitylinkingtasksItemsAllof1): + """TasksStateTasksEntityLinkingTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntityLinkingResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + results: Optional["EntityLinkingResult"] = None, + name: Optional[str] = None, + **kwargs + ): + super(TasksStateTasksEntityLinkingTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, results=results, **kwargs) + self.results = results + self.last_update_date_time = last_update_date_time + self.name = name + self.status = status + + +class TasksStateTasksEntityRecognitionPiiTasksItem(TaskState, Components15X8E9LSchemasTasksstatePropertiesTasksPropertiesEntityrecognitionpiitasksItemsAllof1): + """TasksStateTasksEntityRecognitionPiiTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'PiiResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + results: Optional["PiiResult"] = None, + name: Optional[str] = None, + **kwargs + ): + super(TasksStateTasksEntityRecognitionPiiTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, results=results, **kwargs) + self.results = results + self.last_update_date_time = last_update_date_time + self.name = name + self.status = status + + +class TasksStateTasksEntityRecognitionTasksItem(TaskState, Components15Gvwi3SchemasTasksstatePropertiesTasksPropertiesEntityrecognitiontasksItemsAllof1): + """TasksStateTasksEntityRecognitionTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'EntitiesResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + results: Optional["EntitiesResult"] = None, + name: Optional[str] = None, + **kwargs + ): + super(TasksStateTasksEntityRecognitionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, results=results, **kwargs) + self.results = results + self.last_update_date_time = last_update_date_time + self.name = name + self.status = status + + +class TasksStateTasksKeyPhraseExtractionTasksItem(TaskState, Components1D9IzucSchemasTasksstatePropertiesTasksPropertiesKeyphraseextractiontasksItemsAllof1): + """TasksStateTasksKeyPhraseExtractionTasksItem. + + All required parameters must be populated in order to send to Azure. + + :param results: + :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + :param last_update_date_time: Required. + :type last_update_date_time: ~datetime.datetime + :param name: + :type name: str + :param status: Required. Possible values include: "notStarted", "running", "succeeded", + "failed", "rejected", "cancelled", "cancelling", "partiallyCompleted". + :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State + """ + + _validation = { + 'last_update_date_time': {'required': True}, + 'status': {'required': True}, + } + + _attribute_map = { + 'results': {'key': 'results', 'type': 'KeyPhraseResult'}, + 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "State"], + results: Optional["KeyPhraseResult"] = None, + name: Optional[str] = None, + **kwargs + ): + super(TasksStateTasksKeyPhraseExtractionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, results=results, **kwargs) + self.results = results + self.last_update_date_time = last_update_date_time + self.name = name + self.status = status + + +class TextAnalyticsError(msrest.serialization.Model): + """TextAnalyticsError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "InvalidRequest", + "InvalidArgument", "InternalServerError", "ServiceUnavailable", "NotFound". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorCodeValue + :param message: Required. Error message. + :type message: str + :param target: Error target. + :type target: str + :param innererror: Inner error contains more specific information. + :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError + :param details: Details about specific errors that led to this reported error. + :type details: list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError] + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + 'details': {'key': 'details', 'type': '[TextAnalyticsError]'}, + } + + def __init__( + self, + *, + code: Union[str, "ErrorCodeValue"], + message: str, + target: Optional[str] = None, + innererror: Optional["InnerError"] = None, + details: Optional[List["TextAnalyticsError"]] = None, + **kwargs + ): + super(TextAnalyticsError, self).__init__(**kwargs) + self.code = code + self.message = message + self.target = target + self.innererror = innererror + self.details = details + + +class TextAnalyticsWarning(msrest.serialization.Model): + """TextAnalyticsWarning. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Error code. Possible values include: "LongWordsInDocument", + "DocumentTruncated". + :type code: str or ~azure.ai.textanalytics.v3_1_preview_4.models.WarningCodeValue + :param message: Required. Warning message. + :type message: str + :param target_ref: A JSON pointer reference indicating the target object. + :type target_ref: str + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target_ref': {'key': 'targetRef', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Union[str, "WarningCodeValue"], + message: str, + target_ref: Optional[str] = None, + **kwargs + ): + super(TextAnalyticsWarning, self).__init__(**kwargs) + self.code = code + self.message = message + self.target_ref = target_ref diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_text_analytics_client_enums.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_text_analytics_client_enums.py new file mode 100644 index 000000000000..227630b77b02 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/models/_text_analytics_client_enums.py @@ -0,0 +1,358 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class Association(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes if the entity is the subject of the text or if it describes someone else. + """ + + SUBJECT = "subject" + OTHER = "other" + +class Certainty(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the entities certainty and polarity. + """ + + POSITIVE = "Positive" + POSITIVE_POSSIBLE = "Positive Possible" + NEUTRAL_POSSIBLE = "Neutral Possible" + NEGATIVE_POSSIBLE = "Negative Possible" + NEGATIVE = "Negative" + +class Conditionality(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes any conditionality on the entity. + """ + + HYPOTHETICAL = "Hypothetical" + CONDITIONAL = "Conditional" + +class DocumentSentimentValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Predicted sentiment for document (Negative, Neutral, Positive, or Mixed). + """ + + POSITIVE = "positive" + NEUTRAL = "neutral" + NEGATIVE = "negative" + MIXED = "mixed" + +class ErrorCodeValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error code. + """ + + INVALID_REQUEST = "InvalidRequest" + INVALID_ARGUMENT = "InvalidArgument" + INTERNAL_SERVER_ERROR = "InternalServerError" + SERVICE_UNAVAILABLE = "ServiceUnavailable" + NOT_FOUND = "NotFound" + +class InnerErrorCodeValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error code. + """ + + INVALID_PARAMETER_VALUE = "InvalidParameterValue" + INVALID_REQUEST_BODY_FORMAT = "InvalidRequestBodyFormat" + EMPTY_REQUEST = "EmptyRequest" + MISSING_INPUT_RECORDS = "MissingInputRecords" + INVALID_DOCUMENT = "InvalidDocument" + MODEL_VERSION_INCORRECT = "ModelVersionIncorrect" + INVALID_DOCUMENT_BATCH = "InvalidDocumentBatch" + UNSUPPORTED_LANGUAGE_CODE = "UnsupportedLanguageCode" + INVALID_COUNTRY_HINT = "InvalidCountryHint" + +class PiiCategory(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + ABA_ROUTING_NUMBER = "ABARoutingNumber" + AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber" + AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber" + AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber" + AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber" + AU_PASSPORT_NUMBER = "AUPassportNumber" + AU_TAX_FILE_NUMBER = "AUTaxFileNumber" + AU_BUSINESS_NUMBER = "AUBusinessNumber" + AU_COMPANY_NUMBER = "AUCompanyNumber" + AT_IDENTITY_CARD = "ATIdentityCard" + AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber" + AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber" + AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey" + AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = "AzureIAASDatabaseConnectionAndSQLString" + AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString" + AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword" + AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString" + AZURE_SAS = "AzureSAS" + AZURE_SERVICE_BUS_STRING = "AzureServiceBusString" + AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey" + AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric" + BE_NATIONAL_NUMBER = "BENationalNumber" + BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2" + BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber" + BRCPF_NUMBER = "BRCPFNumber" + BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber" + BR_NATIONAL_IDRG = "BRNationalIDRG" + BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber" + CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber" + CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber" + CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber" + CA_PASSPORT_NUMBER = "CAPassportNumber" + CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification" + CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber" + CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber" + CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber" + CREDIT_CARD_NUMBER = "CreditCardNumber" + HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber" + HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber" + HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber" + HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2" + CY_IDENTITY_CARD = "CYIdentityCard" + CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber" + CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber" + CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2" + DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber" + DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2" + DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber" + EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode" + EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber" + EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber" + EUGPS_COORDINATES = "EUGPSCoordinates" + EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber" + EU_PASSPORT_NUMBER = "EUPassportNumber" + EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber" + EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber" + FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber" + FI_NATIONAL_ID = "FINationalID" + FI_NATIONAL_IDV2 = "FINationalIDV2" + FI_PASSPORT_NUMBER = "FIPassportNumber" + FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber" + FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber" + FR_NATIONAL_ID = "FRNationalID" + FR_PASSPORT_NUMBER = "FRPassportNumber" + FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber" + FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber" + FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber" + DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber" + DE_PASSPORT_NUMBER = "DEPassportNumber" + DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber" + DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber" + DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber" + GR_NATIONAL_ID_CARD = "GRNationalIDCard" + GR_NATIONAL_IDV2 = "GRNationalIDV2" + GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber" + HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber" + HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber" + HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber" + HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber" + IN_PERMANENT_ACCOUNT = "INPermanentAccount" + IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber" + ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber" + INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2" + IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber" + IL_NATIONAL_ID = "ILNationalID" + IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber" + IT_FISCAL_CODE = "ITFiscalCode" + IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber" + JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber" + JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber" + JP_PASSPORT_NUMBER = "JPPassportNumber" + JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber" + JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber" + JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate" + JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal" + JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber" + LV_PERSONAL_CODE = "LVPersonalCode" + LT_PERSONAL_CODE = "LTPersonalCode" + LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural" + LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = "LUNationalIdentificationNumberNonNatural" + MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber" + MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber" + MT_TAX_ID_NUMBER = "MTTaxIDNumber" + NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber" + NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2" + NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber" + NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber" + NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber" + NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber" + NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber" + NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber" + NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber" + NO_IDENTITY_NUMBER = "NOIdentityNumber" + PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber" + PL_IDENTITY_CARD = "PLIdentityCard" + PL_NATIONAL_ID = "PLNationalID" + PL_NATIONAL_IDV2 = "PLNationalIDV2" + PL_PASSPORT_NUMBER = "PLPassportNumber" + PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber" + PLREGON_NUMBER = "PLREGONNumber" + PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber" + PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2" + PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber" + RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode" + RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic" + RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational" + SA_NATIONAL_ID = "SANationalID" + SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = "SGNationalRegistrationIdentityCardNumber" + SK_PERSONAL_NUMBER = "SKPersonalNumber" + SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber" + SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber" + ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber" + KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber" + ESDNI = "ESDNI" + ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber" + ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber" + SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString" + SE_NATIONAL_ID = "SENationalID" + SE_NATIONAL_IDV2 = "SENationalIDV2" + SE_PASSPORT_NUMBER = "SEPassportNumber" + SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber" + SWIFT_CODE = "SWIFTCode" + CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber" + TW_NATIONAL_ID = "TWNationalID" + TW_PASSPORT_NUMBER = "TWPassportNumber" + TW_RESIDENT_CERTIFICATE = "TWResidentCertificate" + TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode" + TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber" + UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber" + UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber" + UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber" + UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber" + UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber" + USUK_PASSPORT_NUMBER = "USUKPassportNumber" + US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber" + US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber" + US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification" + US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber" + UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic" + UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational" + ORGANIZATION = "Organization" + EMAIL = "Email" + URL = "URL" + AGE = "Age" + PHONE_NUMBER = "PhoneNumber" + IP_ADDRESS = "IPAddress" + DATE = "Date" + PERSON = "Person" + ADDRESS = "Address" + ALL = "All" + DEFAULT = "Default" + +class PiiTaskParametersDomain(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + PHI = "phi" + NONE = "none" + +class RelationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Type of relation. Examples include: ``DosageOfMedication`` or 'FrequencyOfMedication', etc. + """ + + ABBREVIATION = "Abbreviation" + DIRECTION_OF_BODY_STRUCTURE = "DirectionOfBodyStructure" + DIRECTION_OF_CONDITION = "DirectionOfCondition" + DIRECTION_OF_EXAMINATION = "DirectionOfExamination" + DIRECTION_OF_TREATMENT = "DirectionOfTreatment" + DOSAGE_OF_MEDICATION = "DosageOfMedication" + FORM_OF_MEDICATION = "FormOfMedication" + FREQUENCY_OF_MEDICATION = "FrequencyOfMedication" + FREQUENCY_OF_TREATMENT = "FrequencyOfTreatment" + QUALIFIER_OF_CONDITION = "QualifierOfCondition" + RELATION_OF_EXAMINATION = "RelationOfExamination" + ROUTE_OF_MEDICATION = "RouteOfMedication" + TIME_OF_CONDITION = "TimeOfCondition" + TIME_OF_EVENT = "TimeOfEvent" + TIME_OF_EXAMINATION = "TimeOfExamination" + TIME_OF_MEDICATION = "TimeOfMedication" + TIME_OF_TREATMENT = "TimeOfTreatment" + UNIT_OF_CONDITION = "UnitOfCondition" + UNIT_OF_EXAMINATION = "UnitOfExamination" + VALUE_OF_CONDITION = "ValueOfCondition" + VALUE_OF_EXAMINATION = "ValueOfExamination" + +class SentenceSentimentValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The predicted Sentiment for the sentence. + """ + + POSITIVE = "positive" + NEUTRAL = "neutral" + NEGATIVE = "negative" + +class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NOT_STARTED = "notStarted" + RUNNING = "running" + SUCCEEDED = "succeeded" + FAILED = "failed" + REJECTED = "rejected" + CANCELLED = "cancelled" + CANCELLING = "cancelling" + PARTIALLY_COMPLETED = "partiallyCompleted" + +class StringIndexType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme + #: clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is + #: written in .Net Framework or .Net Core and you will be using StringInfo. + TEXT_ELEMENTS_V8 = "TextElements_v8" + #: Returned offset and length values will correspond to Unicode code points. Use this option if + #: your application is written in a language that support Unicode, for example Python. + UNICODE_CODE_POINT = "UnicodeCodePoint" + #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your + #: application is written in a language that support Unicode, for example Java, JavaScript. + UTF16_CODE_UNIT = "Utf16CodeUnit" + +class StringIndexTypeResponse(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme + #: clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is + #: written in .Net Framework or .Net Core and you will be using StringInfo. + TEXT_ELEMENTS_V8 = "TextElements_v8" + #: Returned offset and length values will correspond to Unicode code points. Use this option if + #: your application is written in a language that support Unicode, for example Python. + UNICODE_CODE_POINT = "UnicodeCodePoint" + #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your + #: application is written in a language that support Unicode, for example Java, JavaScript. + UTF16_CODE_UNIT = "Utf16CodeUnit" + +class TargetRelationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type related to the target. + """ + + ASSESSMENT = "assessment" + TARGET = "target" + +class TokenSentimentValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Targeted sentiment in the sentence. + """ + + POSITIVE = "positive" + MIXED = "mixed" + NEGATIVE = "negative" + +class WarningCodeValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error code. + """ + + LONG_WORDS_IN_DOCUMENT = "LongWordsInDocument" + DOCUMENT_TRUNCATED = "DocumentTruncated" diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/__init__.py new file mode 100644 index 000000000000..4384511c0346 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin + +__all__ = [ + 'TextAnalyticsClientOperationsMixin', +] diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/_text_analytics_client_operations.py new file mode 100644 index 000000000000..58c4d755e5db --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/operations/_text_analytics_client_operations.py @@ -0,0 +1,1081 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from ...._lro import AnalyzeBatchActionsLROPoller, AnalyzeBatchActionsLROPollingMethod, AnalyzeHealthcareEntitiesLROPoller, AnalyzeHealthcareEntitiesLROPollingMethod +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.polling.base_polling import LROBasePolling + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class TextAnalyticsClientOperationsMixin(object): + + def _analyze_initial( + self, + body=None, # type: Optional["_models.AnalyzeBatchInput"] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.AnalyzeJobState"] + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AnalyzeJobState"]] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self._analyze_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if body is not None: + body_content = self._serialize.body(body, 'AnalyzeBatchInput') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if response.status_code == 202: + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + _analyze_initial.metadata = {'url': '/analyze'} # type: ignore + + def begin_analyze( + self, + body=None, # type: Optional["_models.AnalyzeBatchInput"] + **kwargs # type: Any + ): + # type: (...) -> AnalyzeBatchActionsLROPoller["_models.AnalyzeJobState"] + """Submit analysis job. + + Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed. + + :param body: Collection of documents to analyze and tasks to execute. + :type body: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the AnalyzeBatchActionsLROPollingMethod polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AnalyzeBatchActionsLROPoller that returns either AnalyzeJobState or the result of cls(response) + :rtype: ~...._lro.AnalyzeBatchActionsLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeJobState"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._analyze_initial( + body=body, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + if polling is True: polling_method = AnalyzeBatchActionsLROPollingMethod(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return AnalyzeBatchActionsLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AnalyzeBatchActionsLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_analyze.metadata = {'url': '/analyze'} # type: ignore + + def analyze_status( + self, + job_id, # type: str + show_stats=None, # type: Optional[bool] + top=20, # type: Optional[int] + skip=0, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.AnalyzeJobState" + """Get analysis status and results. + + Get the status of an analysis job. A job may consist of one or more tasks. Once all tasks are + completed, the job will transition to the completed state and results will be available for + each task. + + :param job_id: Job ID for Analyze. + :type job_id: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are + specified, $skip is applied first. + :type top: int + :param skip: (Optional) Set the number of elements to offset in the response. When both $top + and $skip are specified, $skip is applied first. + :type skip: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AnalyzeJobState, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalyzeJobState"] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self.analyze_status.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if top is not None: + query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1) + if skip is not None: + query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('AnalyzeJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + analyze_status.metadata = {'url': '/analyze/jobs/{jobId}'} # type: ignore + + def health_status( + self, + job_id, # type: str + top=20, # type: Optional[int] + skip=0, # type: Optional[int] + show_stats=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> "_models.HealthcareJobState" + """Get healthcare analysis job status and results. + + Get details of the healthcare prediction job specified by the jobId. + + :param job_id: Job ID. + :type job_id: str + :param top: (Optional) Set the maximum number of results per task. When both $top and $skip are + specified, $skip is applied first. + :type top: int + :param skip: (Optional) Set the number of elements to offset in the response. When both $top + and $skip are specified, $skip is applied first. + :type skip: int + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: HealthcareJobState, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthcareJobState"] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self.health_status.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if top is not None: + query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1) + if skip is not None: + query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0) + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + health_status.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + def _cancel_health_job_initial( + self, + job_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, + 409: ResourceExistsError, + 404: lambda response: ResourceNotFoundError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json, text/json" + + # Construct URL + url = self._cancel_health_job_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, None, response_headers) + + _cancel_health_job_initial.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + def begin_cancel_health_job( + self, + job_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[None] + """Cancel healthcare prediction job. + + Cancel healthcare prediction job. + + :param job_id: Job ID. + :type job_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the LROBasePolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[None] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._cancel_health_job_initial( + job_id=job_id, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + if cls: + return cls(pipeline_response, None, {}) + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + } + + if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_cancel_health_job.metadata = {'url': '/entities/health/jobs/{jobId}'} # type: ignore + + def _health_initial( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.HealthcareJobState"] + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.HealthcareJobState"]] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self._health_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if response.status_code == 202: + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + _health_initial.metadata = {'url': '/entities/health/jobs'} # type: ignore + + def begin_health( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + **kwargs # type: Any + ): + # type: (...) -> AnalyzeHealthcareEntitiesLROPoller["_models.HealthcareJobState"] + """Submit healthcare analysis job. + + Start a healthcare analysis job to recognize healthcare related entities (drugs, conditions, + symptoms, etc) and their relations. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: Pass in True if you'd like the AnalyzeHealthcareEntitiesLROPollingMethod polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AnalyzeHealthcareEntitiesLROPoller that returns either HealthcareJobState or the result of cls(response) + :rtype: ~...._lro.AnalyzeHealthcareEntitiesLROPoller[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareJobState] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["_models.HealthcareJobState"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._health_initial( + documents=documents, + model_version=model_version, + string_index_type=string_index_type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('HealthcareJobState', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + + if polling is True: polling_method = AnalyzeHealthcareEntitiesLROPollingMethod(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return AnalyzeHealthcareEntitiesLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AnalyzeHealthcareEntitiesLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_health.metadata = {'url': '/entities/health/jobs'} # type: ignore + + def entities_recognition_general( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + **kwargs # type: Any + ): + # type: (...) -> "_models.EntitiesResult" + """Named Entity Recognition. + + The API returns a list of general named entities in a given document. For the list of supported + entity types, check :code:`Supported Entity Types in Text + Analytics API`. See the :code:`Supported languages in Text + Analytics API` for the list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: EntitiesResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.EntitiesResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_recognition_general.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('EntitiesResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_recognition_general.metadata = {'url': '/entities/recognition/general'} # type: ignore + + def entities_recognition_pii( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + domain=None, # type: Optional[str] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + pii_categories=None, # type: Optional[List[Union[str, "_models.PiiCategory"]]] + **kwargs # type: Any + ): + # type: (...) -> "_models.PiiResult" + """Entities containing personal information. + + The API returns a list of entities with personal information (\"SSN\", \"Bank Account\" etc) in + the document. For the list of supported entity types, check :code:`Supported Entity Types in Text Analytics API`. See the + :code:`Supported languages in Text Analytics API` for the + list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param domain: (Optional) if specified, will set the PII domain to include only a subset of the + entity categories. Possible values include: 'PHI', 'none'. + :type domain: str + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :param pii_categories: (Optional) describes the PII categories to return. + :type pii_categories: list[str or ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory] + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PiiResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PiiResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_recognition_pii.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if domain is not None: + query_parameters['domain'] = self._serialize.query("domain", domain, 'str') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + if pii_categories is not None: + query_parameters['piiCategories'] = self._serialize.query("pii_categories", pii_categories, '[str]', div=',') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('PiiResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_recognition_pii.metadata = {'url': '/entities/recognition/pii'} # type: ignore + + def entities_linking( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + **kwargs # type: Any + ): + # type: (...) -> "_models.EntityLinkingResult" + """Linked entities from a well known knowledge base. + + The API returns a list of recognized entities with links to a well known knowledge base. See + the :code:`Supported languages in Text Analytics API` for + the list of enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: EntityLinkingResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.EntityLinkingResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.entities_linking.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('EntityLinkingResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + entities_linking.metadata = {'url': '/entities/linking'} # type: ignore + + def key_phrases( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> "_models.KeyPhraseResult" + """Key Phrases. + + The API returns a list of strings denoting the key phrases in the input text. See the :code:`Supported languages in Text Analytics API` for the list of + enabled languages. + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: KeyPhraseResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyPhraseResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.key_phrases.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('KeyPhraseResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + key_phrases.metadata = {'url': '/keyPhrases'} # type: ignore + + def languages( + self, + documents, # type: List["_models.LanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> "_models.LanguageResult" + """Detect Language. + + The API returns the detected language and a numeric score between 0 and 1. Scores close to 1 + indicate 100% certainty that the identified language is true. See the :code:`Supported languages in Text Analytics API` for the list of + enabled languages. + + :param documents: + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: LanguageResult, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.LanguageResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.LanguageResult"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.LanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.languages.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'LanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('LanguageResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + languages.metadata = {'url': '/languages'} # type: ignore + + def sentiment( + self, + documents, # type: List["_models.MultiLanguageInput"] + model_version=None, # type: Optional[str] + show_stats=None, # type: Optional[bool] + opinion_mining=None, # type: Optional[bool] + string_index_type="TextElements_v8", # type: Optional[Union[str, "_models.StringIndexType"]] + **kwargs # type: Any + ): + # type: (...) -> "_models.SentimentResponse" + """Sentiment. + + The API returns a detailed sentiment analysis for the input text. The analysis is done in + multiple levels of granularity, start from the a document level, down to sentence and key terms + (targets and assessments). + + :param documents: The set of documents to process as part of this batch. + :type documents: list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput] + :param model_version: (Optional) This value indicates which model will be used for scoring. If + a model-version is not specified, the API should default to the latest, non-preview version. + :type model_version: str + :param show_stats: (Optional) if set to true, response will contain request and document level + statistics. + :type show_stats: bool + :param opinion_mining: (Optional) if set to true, response will contain not only sentiment + prediction but also opinion mining (aspect-based sentiment analysis) results. + :type opinion_mining: bool + :param string_index_type: (Optional) Specifies the method used to interpret string offsets. + Defaults to Text Elements (Graphemes) according to Unicode v8.0.0. For additional information + see https://aka.ms/text-analytics-offsets. + :type string_index_type: str or ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SentimentResponse, or the result of cls(response) + :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SentimentResponse"] + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 400: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + 500: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.ErrorResponse, response)), + } + error_map.update(kwargs.pop('error_map', {})) + + _input = _models.MultiLanguageBatchInput(documents=documents) + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json, text/json" + + # Construct URL + url = self.sentiment.metadata['url'] # type: ignore + path_format_arguments = { + 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if model_version is not None: + query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str') + if show_stats is not None: + query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool') + if opinion_mining is not None: + query_parameters['opinionMining'] = self._serialize.query("opinion_mining", opinion_mining, 'bool') + if string_index_type is not None: + query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(_input, 'MultiLanguageBatchInput') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = self._deserialize('SentimentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + sentiment.metadata = {'url': '/sentiment'} # type: ignore diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/py.typed b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_4/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_lro.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_lro.py index bd7f33b54ec6..ceb2c66ee5e6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_lro.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_lro.py @@ -186,8 +186,8 @@ class AnalyzeBatchActionsLROPollingMethod(TextAnalyticsLROPollingMethod): @property def _current_body(self): - from ._generated.v3_1_preview_3.models import JobMetadata - return JobMetadata.deserialize(self._pipeline_response) + from ._generated.v3_1_preview_3.models import AnalyzeJobMetadata + return AnalyzeJobMetadata.deserialize(self._pipeline_response) @property def created_on(self): diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py index 6cff2cd577ad..45329686fe30 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py @@ -677,8 +677,7 @@ class TextDocumentStatistics(DictMixin): :ivar character_count: Number of text elements recognized in the document. :vartype character_count: int - :ivar transaction_count: Number of transactions for the - document. + :ivar transaction_count: Number of transactions for the document. :vartype transaction_count: int """ @@ -1009,10 +1008,10 @@ def _from_generated(cls, sentence, results, sentiment): # the correct encoding was not introduced for v3.0 offset = None length = None - if hasattr(sentence, "aspects"): + if hasattr(sentence, "targets"): mined_opinions = ( - [MinedOpinion._from_generated(aspect, results, sentiment) for aspect in sentence.aspects] # pylint: disable=protected-access - if sentence.aspects else [] + [MinedOpinion._from_generated(target, results, sentiment) for target in sentence.targets] # pylint: disable=protected-access + if sentence.targets else [] ) else: mined_opinions = None @@ -1038,70 +1037,69 @@ def __repr__(self): class MinedOpinion(DictMixin): """A mined opinion object represents an opinion we've extracted from a sentence. - It consists of both an aspect that these opinions are about, and the actual - opinions themselves. + It consists of both a target that these opinions are about, and the assessments + representing the opinion. - :ivar aspect: The aspect of a product/service that this opinion is about - :vartype aspect: ~azure.ai.textanalytics.AspectSentiment - :ivar opinions: The actual opinions of the aspect - :vartype opinions: list[~azure.ai.textanalytics.OpinionSentiment] + :ivar target: The target of an opinion about a product/service. + :vartype target: ~azure.ai.textanalytics.TargetSentiment + :ivar assessments: The assessments representing the opinion of the target. + :vartype assessments: list[~azure.ai.textanalytics.AssessmentSentiment] """ def __init__(self, **kwargs): - self.aspect = kwargs.get("aspect", None) - self.opinions = kwargs.get("opinions", None) + self.target = kwargs.get("target", None) + self.assessments = kwargs.get("assessments", None) @staticmethod - def _get_opinions(relations, results, sentiment): # pylint: disable=unused-argument + def _get_assessments(relations, results, sentiment): # pylint: disable=unused-argument if not relations: return [] - opinion_relations = [r.ref for r in relations if r.relation_type == "opinion"] - opinions = [] - for opinion_relation in opinion_relations: - nums = _get_indices(opinion_relation) + assessment_relations = [r.ref for r in relations if r.relation_type == "assessment"] + assessments = [] + for assessment_relation in assessment_relations: + nums = _get_indices(assessment_relation) sentence_index = nums[1] - opinion_index = nums[2] - opinions.append( - sentiment.sentences[sentence_index].opinions[opinion_index] + assessment_index = nums[2] + assessments.append( + sentiment.sentences[sentence_index].assessments[assessment_index] ) - return opinions + return assessments @classmethod - def _from_generated(cls, aspect, results, sentiment): + def _from_generated(cls, target, results, sentiment): return cls( - aspect=AspectSentiment._from_generated(aspect), # pylint: disable=protected-access - opinions=[ - OpinionSentiment._from_generated(opinion) # pylint: disable=protected-access - for opinion in cls._get_opinions(aspect.relations, results, sentiment) + target=TargetSentiment._from_generated(target), # pylint: disable=protected-access + assessments=[ + AssessmentSentiment._from_generated(assessment) # pylint: disable=protected-access + for assessment in cls._get_assessments(target.relations, results, sentiment) ], ) def __repr__(self): - return "MinedOpinion(aspect={}, opinions={})".format( - repr(self.aspect), - repr(self.opinions) + return "MinedOpinion(target={}, assessments={})".format( + repr(self.target), + repr(self.assessments) )[:1024] -class AspectSentiment(DictMixin): - """AspectSentiment contains the related opinions, predicted sentiment, - confidence scores and other information about an aspect of a product. - An aspect of a product/service is a key component of that product/service. - For example in "The food at Hotel Foo is good", "food" is an aspect of +class TargetSentiment(DictMixin): + """TargetSentiment contains the predicted sentiment, + confidence scores and other information about a key component of a product/service. + For example in "The food at Hotel Foo is good", "food" is an key component of "Hotel Foo". - :ivar str text: The aspect text. - :ivar str sentiment: The predicted Sentiment for the aspect. Possible values + :ivar str text: The text value of the target. + :ivar str sentiment: The predicted Sentiment for the target. Possible values include 'positive', 'mixed', and 'negative'. :ivar confidence_scores: The sentiment confidence score between 0 - and 1 for the aspect for 'positive' and 'negative' labels. It's score + and 1 for the target for 'positive' and 'negative' labels. It's score for 'neutral' will always be 0 :vartype confidence_scores: ~azure.ai.textanalytics.SentimentConfidenceScores - :ivar int length: The aspect text length. This value depends on the value of the + :ivar int length: The target text length. This value depends on the value of the `string_index_type` parameter set in the original request, which is UnicodeCodePoints by default. - :ivar int offset: The aspect text offset from the start of the document. + :ivar int offset: The target text offset from the start of the document. The value depends on the value of the `string_index_type` parameter set in the original request, which is UnicodeCodePoints by default. """ @@ -1114,18 +1112,18 @@ def __init__(self, **kwargs): self.offset = kwargs.get("offset", None) @classmethod - def _from_generated(cls, aspect): + def _from_generated(cls, target): return cls( - text=aspect.text, - sentiment=aspect.sentiment, - confidence_scores=SentimentConfidenceScores._from_generated(aspect.confidence_scores), # pylint: disable=protected-access - length=aspect.length, - offset=aspect.offset, + text=target.text, + sentiment=target.sentiment, + confidence_scores=SentimentConfidenceScores._from_generated(target.confidence_scores), # pylint: disable=protected-access + length=target.length, + offset=target.offset, ) def __repr__(self): - return "AspectSentiment(text={}, sentiment={}, confidence_scores={}, "\ - "length={}, offset={})".format( + return "TargetSentiment(text={}, sentiment={}, confidence_scores={}, "\ + "length={}, offset={})".format( self.text, self.sentiment, repr(self.confidence_scores), @@ -1134,28 +1132,28 @@ def __repr__(self): )[:1024] -class OpinionSentiment(DictMixin): - """OpinionSentiment contains the predicted sentiment, - confidence scores and other information about an opinion of an aspect. - For example, in the sentence "The food is good", the opinion of the - aspect 'food' is 'good'. +class AssessmentSentiment(DictMixin): + """AssessmentSentiment contains the predicted sentiment, + confidence scores and other information about an assessment given about + a particular target. For example, in the sentence "The food is good", the assessment + of the target 'food' is 'good'. - :ivar str text: The opinion text. - :ivar str sentiment: The predicted Sentiment for the opinion. Possible values + :ivar str text: The assessment text. + :ivar str sentiment: The predicted Sentiment for the assessment. Possible values include 'positive', 'mixed', and 'negative'. :ivar confidence_scores: The sentiment confidence score between 0 - and 1 for the opinion for 'positive' and 'negative' labels. It's score + and 1 for the assessment for 'positive' and 'negative' labels. It's score for 'neutral' will always be 0 :vartype confidence_scores: ~azure.ai.textanalytics.SentimentConfidenceScores - :ivar int length: The opinion text length. This value depends on the value of the + :ivar int length: The assessment text length. This value depends on the value of the `string_index_type` parameter set in the original request, which is UnicodeCodePoints by default. - :ivar int offset: The opinion text offset from the start of the document. + :ivar int offset: The assessment text offset from the start of the document. The value depends on the value of the `string_index_type` parameter set in the original request, which is UnicodeCodePoints by default. - :ivar bool is_negated: Whether the opinion is negated. For example, in - "The food is not good", the opinion "good" is negated. + :ivar bool is_negated: Whether the value of the assessment is negated. For example, in + "The food is not good", the assessment "good" is negated. """ def __init__(self, **kwargs): @@ -1167,19 +1165,19 @@ def __init__(self, **kwargs): self.is_negated = kwargs.get("is_negated", None) @classmethod - def _from_generated(cls, opinion): + def _from_generated(cls, assessment): return cls( - text=opinion.text, - sentiment=opinion.sentiment, - confidence_scores=SentimentConfidenceScores._from_generated(opinion.confidence_scores), # pylint: disable=protected-access - length=opinion.length, - offset=opinion.offset, - is_negated=opinion.is_negated + text=assessment.text, + sentiment=assessment.sentiment, + confidence_scores=SentimentConfidenceScores._from_generated(assessment.confidence_scores), # pylint: disable=protected-access + length=assessment.length, + offset=assessment.offset, + is_negated=assessment.is_negated ) def __repr__(self): return ( - "OpinionSentiment(text={}, sentiment={}, confidence_scores={}, length={}, offset={}, "\ + "AssessmentSentiment(text={}, sentiment={}, confidence_scores={}, length={}, offset={}, " \ "is_negated={})".format( self.text, self.sentiment, diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py index b9b11f48d6c3..d49b070b9f98 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py @@ -690,6 +690,7 @@ def analyze_sentiment( # type: ignore if show_opinion_mining is not None: kwargs.update({"opinion_mining": show_opinion_mining}) + try: return self._client.sentiment( documents=docs, diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py index 500db7f207d8..0c1c111aa6d8 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py @@ -33,7 +33,7 @@ Since these reviews seem so mixed, and since I'm interested in finding exactly what it is about my hotel that should be improved, let's find the complaints users have about individual aspects of this hotel - In order to do that, I'm going to extract aspects that have a negative sentiment. I'm going to map aspect to the mined opinion object we get back to aggregate the reviews by aspect. + In order to do that, I'm going to extract the targets of a negative sentiment. I'm going to map each of these targets to the mined opinion object we get back to aggregate the reviews by target. Let's now go through the aspects of our hotel people have complained about and see what users have specifically said Users have made 1 complaints about 'food', specifically saying that it's 'unacceptable' @@ -86,7 +86,7 @@ async def analyze_sentiment_with_opinion_mining(self): ] async with text_analytics_client: - result = await text_analytics_client.analyze_sentiment(documents, show_opinion_mining=True) + result = await text_analytics_client.analyze_sentiment(documents) doc_result = [doc for doc in result if not doc.is_error] print("\nLet's first see the general sentiment of each of these reviews") @@ -102,27 +102,27 @@ async def analyze_sentiment_with_opinion_mining(self): ) print( - "\nIn order to do that, I'm going to extract aspects that have a negative sentiment. " - "I'm going to map aspect to the mined opinion object we get back to aggregate the reviews by aspect. " + "\nIn order to do that, I'm going to extract the targets of a negative sentiment. " + "I'm going to map each of these targets to the mined opinion object we get back to aggregate the reviews by target. " ) - aspect_to_complaints = {} + target_to_complaints = {} for document in doc_result: for sentence in document.sentences: for mined_opinion in sentence.mined_opinions: - aspect = mined_opinion.aspect - if aspect.sentiment == 'negative': - aspect_to_complaints.setdefault(aspect.text, []) - aspect_to_complaints[aspect.text].append(mined_opinion) + target = mined_opinion.target + if target.sentiment == 'negative': + target_to_complaints.setdefault(target.text, []) + target_to_complaints[target.text].append(mined_opinion) print("\nLet's now go through the aspects of our hotel people have complained about and see what users have specifically said") - for aspect, complaints in aspect_to_complaints.items(): + for target, complaints in target_to_complaints.items(): print("Users have made {} complaint(s) about '{}', specifically saying that it's '{}'".format( len(complaints), - aspect, + target, "', '".join( - [opinion.text for complaint in complaints for opinion in complaint.opinions] + [assessment.text for complaint in complaints for assessment in complaint.assessments] ) )) diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment.py b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment.py index 1ff95581e876..72440024bf70 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment.py @@ -61,7 +61,7 @@ def analyze_sentiment(self): ] - result = text_analytics_client.analyze_sentiment(documents) + result = text_analytics_client.analyze_sentiment(documents, show_opinion_mining=True) docs = [doc for doc in result if not doc.is_error] print("Let's visualize the sentiment of each of these documents") diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment_with_opinion_mining.py b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment_with_opinion_mining.py index e4c4b3c76dfe..634d45125993 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment_with_opinion_mining.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment_with_opinion_mining.py @@ -33,7 +33,7 @@ Since these reviews seem so mixed, and since I'm interested in finding exactly what it is about my hotel that should be improved, let's find the complaints users have about individual aspects of this hotel - In order to do that, I'm going to extract aspects that have a negative sentiment. I'm going to map aspect to the mined opinion object we get back to aggregate the reviews by aspect. + In order to do that, I'm going to extract the targets of a negative sentiment. I'm going to map each of these targets to the mined opinion object we get back to aggregate the reviews by target. Let's now go through the aspects of our hotel people have complained about and see what users have specifically said Users have made 1 complaints about 'food', specifically saying that it's 'unacceptable' @@ -100,27 +100,27 @@ def sample_analyze_sentiment_with_opinion_mining(self): ) print( - "\nIn order to do that, I'm going to extract aspects that have a negative sentiment. " - "I'm going to map aspect to the mined opinion object we get back to aggregate the reviews by aspect. " + "\nIn order to do that, I'm going to extract targets of a negative sentiment. " + "I'm going to map each of these targets to the mined opinion object we get back to aggregate the reviews by target. " ) - aspect_to_complaints = {} + target_to_complaints = {} for document in doc_result: for sentence in document.sentences: for mined_opinion in sentence.mined_opinions: - aspect = mined_opinion.aspect - if aspect.sentiment == 'negative': - aspect_to_complaints.setdefault(aspect.text, []) - aspect_to_complaints[aspect.text].append(mined_opinion) + target = mined_opinion.target + if target.sentiment == 'negative': + target_to_complaints.setdefault(target.text, []) + target_to_complaints[target.text].append(mined_opinion) print("\nLet's now go through the aspects of our hotel people have complained about and see what users have specifically said") - for aspect, complaints in aspect_to_complaints.items(): + for target, complaints in target_to_complaints.items(): print("Users have made {} complaint(s) about '{}', specifically saying that it's '{}'".format( len(complaints), - aspect, + target, "', '".join( - [opinion.text for complaint in complaints for opinion in complaint.opinions] + [assessment.text for complaint in complaints for assessment in complaint.assessments] ) )) diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_dict.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_dict.yaml index 8211773d5afc..05332401c6be 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_dict.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_dict.yaml @@ -17,7 +17,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=true&stringIndexType=UnicodeCodePoint response: @@ -30,13 +30,13 @@ interactions: recommend you try it."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 14884da7-5594-4295-aa10-18dd7c15dc34 + - 9d32feee-f145-48af-88e1-a52179b6bf6b content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:28:42 GMT + - Fri, 19 Feb 2021 23:10:38 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -44,7 +44,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '96' + - '3772' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_text_document_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_text_document_input.yaml index 64fe711358c4..f9f24c07bc72 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_text_document_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_all_successful_passing_text_document_input.yaml @@ -17,7 +17,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -30,13 +30,13 @@ interactions: recommend you try it."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 949d9879-7fe5-4a68-8e74-af1868cc7562 + - bedff57c-50ed-41cc-9364-a8c61f3ba2b0 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:28:43 GMT + - Fri, 19 Feb 2021 23:10:38 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -44,7 +44,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '98' + - '110' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_credentials.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_credentials.yaml index e6b94f5eb1d9..ff6de1b313ec 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_credentials.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_credentials.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -26,7 +26,7 @@ interactions: content-length: - '224' date: - - Wed, 27 Jan 2021 02:24:36 GMT + - Fri, 19 Feb 2021 23:10:38 GMT status: code: 401 message: PermissionDenied diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_model_version_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_model_version_error.yaml index 500751133e21..45cb20919734 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_model_version_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_bad_model_version_error.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=bad&showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,11 +23,11 @@ interactions: model version. Possible values are: latest,2019-10-01,2020-04-01"}}}' headers: apim-request-id: - - fc86e6fb-2b50-4e85-9c04-b4befd5c1a61 + - f7f5419f-9d46-4caf-aab2-bdb63fc07c9d content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:29:07 GMT + - Fri, 19 Feb 2021 23:10:39 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -35,7 +35,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '4' + - '8' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit.yaml index 39379b15ba33..f9c22552eff0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit.yaml @@ -758,7 +758,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -767,11 +767,11 @@ interactions: request contains too many records. Max 10 records are permitted."}}}' headers: apim-request-id: - - a598fb33-e124-4bc4-a16a-aa20f2da415c + - f4d5fca6-7a5a-413a-ae72-3e43600ae864 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:23:37 GMT + - Fri, 19 Feb 2021 23:10:39 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -779,7 +779,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '10' + - '11' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit_error.yaml index 54d446141bbc..c0bd2ecc4354 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_batch_size_over_limit_error.yaml @@ -723,7 +723,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -732,11 +732,11 @@ interactions: request contains too many records. Max 10 records are permitted."}}}' headers: apim-request-id: - - b993e6d9-6a8c-46bf-9c5f-8cb03429c3a8 + - 295169ce-6648-4912-be9b-5c51a1d4609b content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:28:42 GMT + - Fri, 19 Feb 2021 23:10:39 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -744,7 +744,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '9' + - '11' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_client_passed_default_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_client_passed_default_language_hint.yaml index ac283b97fdcd..6488d2990e44 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_client_passed_default_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_client_passed_default_language_hint.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - b5715167-d74a-4fa8-ace6-b93867b4f800 + - b833f4cb-49c4-45aa-86c0-8b9ec2c9a221 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:28:43 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -41,7 +41,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '127' + - '253' status: code: 200 message: OK @@ -62,7 +62,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -73,13 +73,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 672f1e3d-22c2-46e0-a390-ab8f108067e1 + - ff1877ce-4a41-4958-961a-2fef20f5a1d4 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:28:43 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -87,7 +87,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '160' + - '104' status: code: 200 message: OK @@ -108,7 +108,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -119,13 +119,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 3ad2d4b5-0791-4e65-945c-f7c37caeeab9 + - e25c1ce7-9d2b-46da-a2a0-7fb687d606f2 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:28:44 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -133,7 +133,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '117' + - '123' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_default_string_index_type_is_UnicodeCodePoint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_default_string_index_type_is_UnicodeCodePoint.yaml index 083d9d4191ac..5500a8c7505e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_default_string_index_type_is_UnicodeCodePoint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_default_string_index_type_is_UnicodeCodePoint.yaml @@ -1,7 +1,6 @@ interactions: - request: - body: '{"documents": [{"id": "0", "text": "Bill Gates is the CEO of Microsoft.", - "language": "en"}]}' + body: '{"documents": [{"id": "0", "text": "Hello world", "language": "en"}]}' headers: Accept: - application/json, text/json @@ -10,26 +9,26 @@ interactions: Connection: - keep-alive Content-Length: - - '93' + - '69' Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.7.5 (Linux-4.4.0-19041-Microsoft-x86_64-with-debian-stretch-sid) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"offset":0,"length":35,"text":"Bill - Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"offset":0,"length":11,"text":"Hello + world"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - ca8efa72-dc14-4ec9-bad1-2a89313792cc + - 8b73df3d-2c2b-4da3-8b62-43bf2e984ff4 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Fri, 29 Jan 2021 20:20:23 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +36,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '116' + - '87' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_no_result_attribute.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_no_result_attribute.yaml index 3d459d4ea866..0421ab8c91ce 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_no_result_attribute.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_no_result_attribute.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,11 +23,11 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - cfaf1490-f591-45df-877a-7727ced889b2 + - aaa1d214-17af-41b5-9769-24219364f0b5 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:24:37 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -35,7 +35,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '2' + - '8' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_nonexistent_attribute.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_nonexistent_attribute.yaml index 610c35788a4c..a37968061e6b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_nonexistent_attribute.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_attribute_error_nonexistent_attribute.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,11 +23,11 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 72cbb13f-fa1e-478c-b1ef-740732ba19bc + - 60f1b8b3-c178-4cc9-80ee-d86c22cee587 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:21:16 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -35,7 +35,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '6' + - '13' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_errors.yaml index 9c21d08e8db1..9bc159355f8b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_errors.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -32,11 +32,11 @@ interactions: see https://aka.ms/text-analytics-data-limits"}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 5823b92a-741c-47fe-a617-ea136e6f081e + - 1b800e49-6d22-462e-bfeb-e8ee9c69513f content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:29:08 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -44,7 +44,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '2' + - '4' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml index 026776c17156..328ee6c52b42 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_document_warnings.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,13 +23,13 @@ interactions: won''t actually create a warning :''("}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - e00da657-7aed-4090-812d-2f20a25d25da + - c7c7fe04-db0b-4ec6-b1dc-3d5fadad6806 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:23:38 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +37,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '87' + - '85' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_duplicate_ids_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_duplicate_ids_error.yaml index 84b4e1e00d47..9931f0fd487b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_duplicate_ids_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_duplicate_ids_error.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,11 +23,11 @@ interactions: contains duplicated Ids. Make sure each document has a unique Id."}}}' headers: apim-request-id: - - 4c72b1b7-409f-4801-9ed3-64e11f0bf881 + - d3764393-d306-48c4-b091-e7ef4ed5a634 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:28:43 GMT + - Fri, 19 Feb 2021 23:10:40 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -35,7 +35,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '7' + - '5' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_empty_credential_class.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_empty_credential_class.yaml index a4dfc984e2e1..10862d2fa9bd 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_empty_credential_class.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_empty_credential_class.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -26,7 +26,7 @@ interactions: content-length: - '224' date: - - Wed, 27 Jan 2021 02:28:44 GMT + - Fri, 19 Feb 2021 23:10:41 GMT status: code: 401 message: PermissionDenied diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_explicit_set_string_index_type.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_explicit_set_string_index_type.yaml index 48c485df93a8..626bc99d940f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_explicit_set_string_index_type.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_explicit_set_string_index_type.yaml @@ -1,7 +1,6 @@ interactions: - request: - body: '{"documents": [{"id": "0", "text": "Bill Gates is the CEO of Microsoft.", - "language": "en"}]}' + body: '{"documents": [{"id": "0", "text": "Hello world", "language": "en"}]}' headers: Accept: - application/json, text/json @@ -10,26 +9,26 @@ interactions: Connection: - keep-alive Content-Length: - - '93' + - '69' Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.7.5 (Linux-4.4.0-19041-Microsoft-x86_64-with-debian-stretch-sid) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=TextElements_v8 response: body: - string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"offset":0,"length":35,"text":"Bill - Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"offset":0,"length":11,"text":"Hello + world"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - b33e218c-dafa-4bab-b87a-85bde56f7cd1 + - 5e41f485-95f2-4df3-bbab-26ef8f0c0be3 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Fri, 29 Jan 2021 20:32:05 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +36,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '93' + - '113' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_all_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_all_errors.yaml index b15822b455d9..d08d91ebcb13 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_all_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_all_errors.yaml @@ -15,7 +15,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -29,11 +29,11 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 922e0dd7-aff6-48bb-bf64-2cb845487760 + - d54a3e39-b16e-47ba-9277-cdc222031539 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:21:16 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_some_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_some_errors.yaml index 1b086c72c49f..ace4c8212b38 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_some_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_input_with_some_errors.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -30,13 +30,13 @@ interactions: language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - f3c3ac37-3a37-4b75-ab58-e51ff97732da + - 0d145622-d885-45b8-80af-cb2ecc1da516 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:17 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -44,7 +44,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '99' + - '102' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_docs.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_docs.yaml index ebb59537e935..f5d653a6d977 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_docs.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_docs.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -24,11 +24,11 @@ interactions: language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - eea4bd0c-a21b-41ce-a213-9ff97b5ff81b + - 65a125fc-838b-4523-93b5-99f7cda086f6 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:21:18 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_method.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_method.yaml index 5a48b723afe0..30cd6bee489a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_method.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_invalid_language_hint_method.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -24,11 +24,11 @@ interactions: language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 7c780695-f308-4a25-b612-c2e97ea4e3fa + - d3361c3b-cb1f-4d5f-af3b-41e92040ffcd content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:21:17 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -36,7 +36,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '8' + - '2' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_language_kwarg_spanish.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_language_kwarg_spanish.yaml index 6eea81ddc2a6..932cad095623 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_language_kwarg_spanish.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_language_kwarg_spanish.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=latest&showStats=true&stringIndexType=UnicodeCodePoint response: @@ -23,13 +23,13 @@ interactions: Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 1db2156a-8e52-49b2-b365-982dcb183fda + - 54e1bce6-8a84-4a6d-9949-bdbe75c0ec29 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:19 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +37,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '481' + - '109' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_no_offset_v3_sentence_sentiment.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_no_offset_v3_sentence_sentiment.yaml index ad394aa082d2..a811e7c3a0ab 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_no_offset_v3_sentence_sentiment.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_no_offset_v3_sentence_sentiment.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.0/sentiment?showStats=false response: @@ -24,13 +24,13 @@ interactions: do not like being inside"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - c562acd8-aee8-4c78-9f60-25c630d483d1 + - c3dedbd7-6408-4c52-b726-9632d1e2a80a content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:19 GMT + - Fri, 19 Feb 2021 23:10:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -38,7 +38,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '80' + - '103' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_offset.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_offset.yaml index 31ccc6210045..238d654a00d0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_offset.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_offset.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -24,13 +24,13 @@ interactions: do not like being inside"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - d0204fa0-5729-433c-9126-02332b9098e4 + - efeb6cd4-e572-4329-b5e2-1201761a81e1 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:21 GMT + - Fri, 19 Feb 2021 23:10:42 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -38,7 +38,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '810' + - '132' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining.yaml index f7018300bc96..2ceb45afaea6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining.yaml @@ -14,22 +14,22 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"positive","confidenceScores":{"positive":0.98,"neutral":0.02,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.98,"neutral":0.02,"negative":0.0},"offset":0,"length":74,"text":"It - has a sleek premium aluminum design that makes it beautiful to look at.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":32,"length":6,"text":"design","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":9,"length":5,"text":"sleek","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":7,"text":"premium","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + has a sleek premium aluminum design that makes it beautiful to look at.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":32,"length":6,"text":"design","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":9,"length":5,"text":"sleek","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":7,"text":"premium","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 36de0f2e-c1a3-4cfb-986d-36455482ee3e + - f893a757-152d-41d0-9021-62356ce0a962 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:21 GMT + - Sat, 20 Feb 2021 00:04:47 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +37,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '469' + - '4069' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_more_than_5_documents.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_more_than_5_documents.yaml index e4a3da43853d..dc20e59a3ab9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_more_than_5_documents.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_more_than_5_documents.yaml @@ -21,30 +21,30 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":25,"text":"The - food was unacceptable","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":4,"length":4,"text":"food","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":13,"length":12,"text":"unacceptable","isNegated":false}]}],"warnings":[]},{"id":"1","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":25,"text":"The - rooms were beautiful.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/1/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":9,"text":"beautiful","isNegated":false}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":26,"length":26,"text":"The - AC was good and quiet.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":30,"length":2,"text":"AC","relations":[{"relationType":"opinion","ref":"#/documents/1/sentences/1/opinions/0"},{"relationType":"opinion","ref":"#/documents/1/sentences/1/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":37,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":46,"length":5,"text":"quiet","isNegated":false}]}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"offset":0,"length":50,"text":"The - breakfast was good, but the toilet was smelly.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":9,"text":"breakfast","relations":[{"relationType":"opinion","ref":"#/documents/2/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":32,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/2/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":18,"length":4,"text":"good","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":43,"length":6,"text":"smelly","isNegated":false}]}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":71,"text":"Loved - this hotel - good breakfast - nice shuttle service - clean rooms.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":11,"length":5,"text":"hotel","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/0"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":24,"length":9,"text":"breakfast","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/1"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":41,"length":15,"text":"shuttle - service","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/2"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":65,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/1"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/3"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/2"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":19,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":36,"length":4,"text":"nice","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":5,"text":"loved","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":59,"length":5,"text":"clean","isNegated":false}]}],"warnings":[]},{"id":"4","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":56,"text":"I - had a great unobstructed view of the Microsoft campus.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"negative":0.03},"offset":27,"length":4,"text":"view","relations":[{"relationType":"opinion","ref":"#/documents/4/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/4/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":8,"length":5,"text":"great","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":0.93,"negative":0.07},"offset":14,"length":12,"text":"unobstructed","isNegated":false}]}],"warnings":[]},{"id":"5","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":75,"text":"Nice - rooms but bathrooms were old and the toilet was dirty when we arrived.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":5,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":15,"length":9,"text":"bathrooms","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/1"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":42,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/2"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":4,"text":"nice","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":30,"length":3,"text":"old","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":53,"length":5,"text":"dirty","isNegated":false}]}],"warnings":[]},{"id":"6","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"offset":0,"length":19,"text":"The - toilet smelled.","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/6/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":11,"length":7,"text":"smelled","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + food was unacceptable","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":4,"length":4,"text":"food","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":13,"length":12,"text":"unacceptable","isNegated":false}]}],"warnings":[]},{"id":"1","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":25,"text":"The + rooms were beautiful.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/1/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":9,"text":"beautiful","isNegated":false}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":26,"length":26,"text":"The + AC was good and quiet.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":30,"length":2,"text":"AC","relations":[{"relationType":"assessment","ref":"#/documents/1/sentences/1/assessments/0"},{"relationType":"assessment","ref":"#/documents/1/sentences/1/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":37,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":46,"length":5,"text":"quiet","isNegated":false}]}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"offset":0,"length":50,"text":"The + breakfast was good, but the toilet was smelly.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":9,"text":"breakfast","relations":[{"relationType":"assessment","ref":"#/documents/2/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":32,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/2/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":18,"length":4,"text":"good","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":43,"length":6,"text":"smelly","isNegated":false}]}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":71,"text":"Loved + this hotel - good breakfast - nice shuttle service - clean rooms.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":11,"length":5,"text":"hotel","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/0"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":24,"length":9,"text":"breakfast","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/1"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":41,"length":15,"text":"shuttle + service","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/2"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":65,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/1"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/3"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/2"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":19,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":36,"length":4,"text":"nice","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":5,"text":"loved","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":59,"length":5,"text":"clean","isNegated":false}]}],"warnings":[]},{"id":"4","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":56,"text":"I + had a great unobstructed view of the Microsoft campus.","targets":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"negative":0.03},"offset":27,"length":4,"text":"view","relations":[{"relationType":"assessment","ref":"#/documents/4/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/4/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":8,"length":5,"text":"great","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":0.93,"negative":0.07},"offset":14,"length":12,"text":"unobstructed","isNegated":false}]}],"warnings":[]},{"id":"5","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":75,"text":"Nice + rooms but bathrooms were old and the toilet was dirty when we arrived.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":5,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":15,"length":9,"text":"bathrooms","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/1"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":42,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/2"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":4,"text":"nice","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":30,"length":3,"text":"old","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":53,"length":5,"text":"dirty","isNegated":false}]}],"warnings":[]},{"id":"6","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"offset":0,"length":19,"text":"The + toilet smelled.","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/6/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":11,"length":7,"text":"smelled","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 781559a5-11db-4c93-bba4-73c132956314 + - 6e6d400d-958e-4c79-ba1d-13246d011e10 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=7 date: - - Wed, 27 Jan 2021 02:21:22 GMT + - Sat, 20 Feb 2021 00:06:10 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -52,7 +52,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '563' + - '4309' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_no_mined_opinions.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_no_mined_opinions.yaml index 07ef0eecdf33..b17ce7ca1211 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_no_mined_opinions.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_no_mined_opinions.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: @@ -22,13 +22,13 @@ interactions: is a hot day","aspects":[],"opinions":[]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 06cb795b-402f-40a8-b622-c119da8ed9dd + - 582583ba-edd9-4c01-bf43-0139d3533ae3 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:23 GMT + - Tue, 23 Feb 2021 01:41:41 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -36,7 +36,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '214' + - '77' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_with_negated_opinion.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_with_negated_opinion.yaml index b639dc48536a..1fc436c3dc0a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_with_negated_opinion.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_opinion_mining_with_negated_opinion.yaml @@ -14,22 +14,22 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":32,"text":"The - food and service is not good","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":4,"text":"food","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":13,"length":7,"text":"service","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":28,"length":4,"text":"good","isNegated":true}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + food and service is not good","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":4,"text":"food","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":13,"length":7,"text":"service","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":28,"length":4,"text":"good","isNegated":true}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 97dd2611-c43b-4b70-aa99-9e767186ec44 + - 3e85ab3d-b006-4fd5-bbd2-c4949fa23f57 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:24 GMT + - Sat, 20 Feb 2021 00:07:37 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +37,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '121' + - '3615' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_out_of_order_ids.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_out_of_order_ids.yaml index 1174abaa2652..02478c2c27c1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_out_of_order_ids.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_out_of_order_ids.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -26,13 +26,13 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - cc6c590e-fe30-4fbf-9f84-8c631ada7e1e + - 29fac6ee-6398-46d5-ab0b-f79ac59c9986 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=4 date: - - Wed, 27 Jan 2021 02:21:24 GMT + - Fri, 19 Feb 2021 23:10:43 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -40,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '116' + - '85' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_output_same_order_as_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_output_same_order_as_input.yaml index ba4c7867cc01..e742aa000066 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_output_same_order_as_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_output_same_order_as_input.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -24,13 +24,13 @@ interactions: string: '{"documents":[{"id":"1","sentiment":"neutral","confidenceScores":{"positive":0.06,"neutral":0.9,"negative":0.04},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.06,"neutral":0.9,"negative":0.04},"offset":0,"length":3,"text":"one"}],"warnings":[]},{"id":"2","sentiment":"neutral","confidenceScores":{"positive":0.01,"neutral":0.97,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.01,"neutral":0.97,"negative":0.02},"offset":0,"length":3,"text":"two"}],"warnings":[]},{"id":"3","sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"offset":0,"length":5,"text":"three"}],"warnings":[]},{"id":"4","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.96,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.96,"negative":0.01},"offset":0,"length":4,"text":"four"}],"warnings":[]},{"id":"5","sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"offset":0,"length":4,"text":"five"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 453d8671-d6ae-4ac8-8625-55699d01cb69 + - e74fc7f3-2bb1-4ac1-9a57-f5a4b53a04ad content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=5 date: - - Wed, 27 Jan 2021 02:21:25 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -38,7 +38,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '86' + - '101' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_pass_cls.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_pass_cls.yaml index 29b25a8c963e..b0386ad8cfc1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_pass_cls.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_pass_cls.yaml @@ -14,7 +14,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,13 +23,13 @@ interactions: passing cls to endpoint"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 5aba728e-262a-49aa-8f03-5b9e47307cee + - 88629d79-8943-47cc-b490-16a98a069b48 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:25 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -37,7 +37,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '102' + - '82' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_passing_only_string.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_passing_only_string.yaml index 1620b5307d97..623256605dde 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_passing_only_string.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_passing_only_string.yaml @@ -17,7 +17,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -32,13 +32,13 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - efaca92c-453d-44f4-86c3-2054eb2b517e + - 156f6be5-d00d-4e34-84e1-a58b4e3a3ea6 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:21:26 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -46,7 +46,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '84' + - '107' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_per_item_dont_use_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_per_item_dont_use_language_hint.yaml index 09c586668eb3..c8ab7d62e132 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_per_item_dont_use_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_per_item_dont_use_language_hint.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 520fc929-3505-44d3-88b7-a25771591db3 + - 4166f948-75c8-4161-a242-708db156e411 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:21:26 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -41,7 +41,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '99' + - '101' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_rotate_subscription_key.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_rotate_subscription_key.yaml index 2de9fca2457e..fe5233e2d57f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_rotate_subscription_key.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_rotate_subscription_key.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - f084250d-a138-4e57-9333-231acadc6b15 + - adc5b0b3-39e3-4a87-952c-1d9a6de7e4c1 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:21:26 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -62,7 +62,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -74,7 +74,7 @@ interactions: content-length: - '224' date: - - Wed, 27 Jan 2021 02:21:26 GMT + - Fri, 19 Feb 2021 23:10:44 GMT status: code: 401 message: PermissionDenied @@ -95,7 +95,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -106,13 +106,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 0e372875-a390-4b6f-8a15-f89258735c30 + - 9996125d-59bf-47d9-aca0-0094e37077d9 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:21:28 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -120,7 +120,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '112' + - '124' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_show_stats_and_model_version.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_show_stats_and_model_version.yaml index 7a34bd9a706b..4ff8d627192a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_show_stats_and_model_version.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_show_stats_and_model_version.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=latest&showStats=true&stringIndexType=UnicodeCodePoint response: @@ -26,13 +26,13 @@ interactions: text is empty."}}}],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 1c6e567a-cd92-47c2-b6ae-9dac540acf25 + - 0a453b2f-9458-4d31-8638-5c6229d79f84 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=4 date: - - Wed, 27 Jan 2021 02:21:28 GMT + - Fri, 19 Feb 2021 23:10:45 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -40,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '82' + - '103' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_string_index_type_not_fail_v3.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_string_index_type_not_fail_v3.yaml index a4cf8550b90b..66e3eccb5b0e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_string_index_type_not_fail_v3.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_string_index_type_not_fail_v3.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.0/sentiment?showStats=false response: @@ -22,13 +22,13 @@ interactions: don''t fail"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - ebbc94b3-a35a-46ee-943c-85fc35e93c98 + - c608617b-6ed1-4466-924f-7376989b047f content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=1 date: - - Wed, 27 Jan 2021 02:21:28 GMT + - Fri, 19 Feb 2021 23:10:44 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -36,7 +36,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '92' + - '112' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_too_many_documents.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_too_many_documents.yaml index 8b7476bf1170..ae42f27d99a0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_too_many_documents.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_too_many_documents.yaml @@ -19,7 +19,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -28,11 +28,11 @@ interactions: request contains too many records. Max 10 records are permitted."}}}' headers: apim-request-id: - - 2e7593f8-56f9-4a85-925f-a8f70292c4f0 + - 6a2cbe28-8012-4c9b-8706-0f11c827f019 content-type: - application/json; charset=utf-8 date: - - Wed, 27 Jan 2021 02:21:29 GMT + - Fri, 19 Feb 2021 23:10:45 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -40,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '5' + - '4' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_user_agent.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_user_agent.yaml index 369b716c9623..c592d9c4ea2c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_user_agent.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_user_agent.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 2d9f6a4b-1063-44ed-a119-92e313f3d01a + - feb0dcf6-3278-4b38-b658-d57cfa58f2f1 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:38 GMT + - Fri, 19 Feb 2021 23:10:45 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -41,7 +41,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '96' + - '95' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_dont_use_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_dont_use_language_hint.yaml index afd650b8ba60..754c904aa9b0 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_dont_use_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_dont_use_language_hint.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -28,13 +28,13 @@ interactions: restaurant was not as good as I hoped."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 5677cc00-de63-4660-8d4c-f21f02124e38 + - 894f04fc-620a-4fc6-ab16-d96601cf164f content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:39 GMT + - Fri, 19 Feb 2021 23:10:45 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -42,7 +42,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '96' + - '98' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint.yaml index 9b752a68718e..1a14b3c3e840 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -28,13 +28,13 @@ interactions: restaurant was not as good as I hoped."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - 0bb0fbfa-709b-4945-86a5-fc794c5324e4 + - 4d6244b4-55c6-4a16-8af5-e1af67e540bc content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:40 GMT + - Fri, 19 Feb 2021 23:10:45 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -42,7 +42,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '848' + - '101' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_input.yaml index 4801862ee19c..a74ae8de6087 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_input.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - cbe28a3e-b29e-4381-a453-a14604ba8687 + - a5d0eba9-66f0-499d-9812-a9d35a82a082 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:40 GMT + - Fri, 19 Feb 2021 23:10:46 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -41,7 +41,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '123' + - '121' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_per_item_hints.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_per_item_hints.yaml index bcfaf2b396db..1d8fcc7fc516 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_per_item_hints.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_dict_per_item_hints.yaml @@ -16,7 +16,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: apim-request-id: - - dcffaaa2-e284-45e4-8da2-928a4dbbef9f + - 7dcbcde2-9cdd-4381-a916-b205544d3946 content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:41 GMT + - Fri, 19 Feb 2021 23:10:46 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -41,7 +41,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '104' + - '205' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_input.yaml index 0df697281469..38dffc43a66a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_input.yaml @@ -16,34 +16,23 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\"\ - :{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"sentences\":[{\"\ - sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\"\ - :0.81,\"negative\":0.04},\"offset\":0,\"length\":41,\"text\":\"I should take\ - \ my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"4\",\"sentiment\"\ - :\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\"\ - :0.14},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"\ - positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"offset\":0,\"length\"\ - :39,\"text\":\"Este es un document escrito en Espa\xF1ol.\"}],\"warnings\"\ - :[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\"\ - :0.15,\"neutral\":0.79,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"\ - neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\"\ - :0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"\ - warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" + string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"offset\":0,\"length\":41,\"text\":\"I + should take my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"4\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"offset\":0,\"length\":39,\"text\":\"Este + es un document escrito en Espa\xF1ol.\"}],\"warnings\":[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\":0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" headers: apim-request-id: - - cff09115-e5e3-4b9c-94af-b3e55c3828ed + - 56928b68-c9e9-4f98-9183-374c8dc787ac content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:41 GMT + - Fri, 19 Feb 2021 23:10:46 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -51,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '127' + - '115' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_per_item_hints.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_per_item_hints.yaml index 065afa381521..fe8aac2d3ff4 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_per_item_hints.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment.test_whole_batch_language_hint_and_obj_per_item_hints.yaml @@ -16,34 +16,23 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\"\ - :{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"sentences\":[{\"\ - sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\"\ - :0.94,\"negative\":0.01},\"offset\":0,\"length\":41,\"text\":\"I should take\ - \ my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"2\",\"sentiment\"\ - :\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\"\ - :0.05},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"\ - positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"offset\":0,\"length\"\ - :39,\"text\":\"Este es un document escrito en Espa\xF1ol.\"}],\"warnings\"\ - :[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\"\ - :0.13,\"neutral\":0.81,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"\ - neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\"\ - :0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"\ - warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" + string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"offset\":0,\"length\":41,\"text\":\"I + should take my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"2\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"offset\":0,\"length\":39,\"text\":\"Este + es un document escrito en Espa\xF1ol.\"}],\"warnings\":[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\":0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" headers: apim-request-id: - - bd2425a0-6fc5-4c14-acda-0a0a132973fa + - 0ab17618-1867-488d-a19e-9ab07d39c1de content-type: - application/json; charset=utf-8 csp-billing-usage: - CognitiveServices.TextAnalytics.BatchScoring=3 date: - - Wed, 27 Jan 2021 02:23:42 GMT + - Fri, 19 Feb 2021 23:10:46 GMT strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -51,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '132' + - '97' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_dict.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_dict.yaml index 2d05359b9036..e9acca34d2e9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_dict.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_dict.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=true&stringIndexType=UnicodeCodePoint response: @@ -25,14 +25,14 @@ interactions: restaurant had really good food."},{"sentiment":"positive","confidenceScores":{"positive":0.96,"neutral":0.03,"negative":0.01},"offset":37,"length":23,"text":"I recommend you try it."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 3693f306-41de-40c0-b2ba-ecfc62d5a2b3 + apim-request-id: 1d5deca6-9b0b-40ae-a062-b47e506bdf54 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:23:43 GMT + date: Sat, 20 Feb 2021 00:08:47 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '102' + x-envoy-upstream-service-time: '119' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_text_document_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_text_document_input.yaml index d7773badea49..dad71d7525f6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_text_document_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_all_successful_passing_text_document_input.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -25,14 +25,14 @@ interactions: restaurant had really good food."},{"sentiment":"positive","confidenceScores":{"positive":0.96,"neutral":0.03,"negative":0.01},"offset":37,"length":23,"text":"I recommend you try it."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 2bb2a8cd-4766-43d2-94f4-1d935c8fd76e + apim-request-id: 1024a163-32ff-4e94-976d-43e2f0772b93 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:23:43 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '100' + x-envoy-upstream-service-time: '116' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_credentials.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_credentials.yaml index c58a07b63d92..881d24ca9541 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_credentials.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_credentials.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -20,7 +20,7 @@ interactions: subscription and use a correct regional API endpoint for your resource."}}' headers: content-length: '224' - date: Wed, 27 Jan 2021 02:23:44 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT status: code: 401 message: PermissionDenied diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_model_version_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_model_version_error.yaml index 36043376c6ae..2e7c1afab2ba 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_model_version_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_bad_model_version_error.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=bad&showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,9 +18,9 @@ interactions: string: '{"error":{"code":"InvalidRequest","message":"Invalid Request.","innererror":{"code":"ModelVersionIncorrect","message":"Invalid model version. Possible values are: latest,2019-10-01,2020-04-01"}}}' headers: - apim-request-id: a32d5345-7be5-400d-9817-27a1bedda7d6 + apim-request-id: 4f723371-9027-41f7-8e5f-918ad531c550 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:44 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit.yaml index 6b8e39c0a1ec..d0d23a38ce81 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit.yaml @@ -754,7 +754,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -762,13 +762,13 @@ interactions: string: '{"error":{"code":"InvalidRequest","message":"Invalid document in request.","innererror":{"code":"InvalidDocumentBatch","message":"Batch request contains too many records. Max 10 records are permitted."}}}' headers: - apim-request-id: 44fbff05-8344-4d5f-aed3-f770dc63fe94 + apim-request-id: 825b5f50-13df-4f5a-aee4-3e6d0c156407 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:45 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '12' + x-envoy-upstream-service-time: '9' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit_error.yaml index 83d3443ed9b5..bba89410ffe6 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_batch_size_over_limit_error.yaml @@ -719,7 +719,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -727,13 +727,13 @@ interactions: string: '{"error":{"code":"InvalidRequest","message":"Invalid document in request.","innererror":{"code":"InvalidDocumentBatch","message":"Batch request contains too many records. Max 10 records are permitted."}}}' headers: - apim-request-id: fac28608-292c-4c4f-b6b0-e113565151e3 + apim-request-id: f179592f-cbbd-4443-8e4a-f2c21a6da410 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:46 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '10' + x-envoy-upstream-service-time: '16' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_client_passed_default_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_client_passed_default_language_hint.yaml index 10eea0c53437..6deed85d69bc 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_client_passed_default_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_client_passed_default_language_hint.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 776138ff-29e7-4922-8361-ee58456a88c2 + apim-request-id: a3ee04f0-9213-4a9f-82e9-79f34a09aa62 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:23:46 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '91' + x-envoy-upstream-service-time: '107' status: code: 200 message: OK @@ -47,7 +47,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -57,14 +57,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 2ed384d4-8ebf-4d4c-88fb-10052d587d0c + apim-request-id: c276419e-e1be-4701-b3e8-609f0ea06246 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:23:47 GMT + date: Sat, 20 Feb 2021 00:08:48 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '97' + x-envoy-upstream-service-time: '123' status: code: 200 message: OK @@ -82,7 +82,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -92,14 +92,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 898e2cfa-8d7b-4ddd-9112-3ad0c3bdd6b8 + apim-request-id: 4f677591-f504-4684-a704-aa0a0f675bf7 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:23:47 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '118' + x-envoy-upstream-service-time: '109' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_default_string_index_type_is_UnicodeCodePoint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_default_string_index_type_is_UnicodeCodePoint.yaml index 0095925971fa..e65eaf628885 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_default_string_index_type_is_UnicodeCodePoint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_default_string_index_type_is_UnicodeCodePoint.yaml @@ -1,33 +1,32 @@ interactions: - request: - body: '{"documents": [{"id": "0", "text": "Bill Gates is the CEO of Microsoft.", - "language": "en"}]}' + body: '{"documents": [{"id": "0", "text": "Hello world", "language": "en"}]}' headers: Accept: - application/json, text/json Content-Length: - - '93' + - '69' Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.7.5 (Linux-4.4.0-19041-Microsoft-x86_64-with-debian-stretch-sid) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"offset":0,"length":35,"text":"Bill - Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"offset":0,"length":11,"text":"Hello + world"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 93998cf6-ea51-4e0e-bf47-efbaf7680e2d + apim-request-id: c75cf1d5-5226-42d8-a9eb-d9e639422f07 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Fri, 29 Jan 2021 20:35:22 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '430' + x-envoy-upstream-service-time: '104' status: code: 200 message: OK - url: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint + url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_no_result_attribute.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_no_result_attribute.yaml index 33de5db6d67b..a9f635c7ff8d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_no_result_attribute.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_no_result_attribute.yaml @@ -9,7 +9,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,9 +18,9 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 8cbdf9fc-08de-4829-87e3-0e93c9a91eb0 + apim-request-id: cc086c7f-d33b-42af-8cd6-f568d5e040c4 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:47 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_nonexistent_attribute.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_nonexistent_attribute.yaml index 58536e661276..8e1931f8c261 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_nonexistent_attribute.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_attribute_error_nonexistent_attribute.yaml @@ -9,7 +9,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,13 +18,13 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 4cda61df-a75c-4eac-8b6f-caa2d7998385 + apim-request-id: 57f52af8-0645-4a6a-aada-23aab45800a3 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:47 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '1' + x-envoy-upstream-service-time: '2' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_errors.yaml index 3e7fdb95ce3f..fccf82b42de7 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_errors.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,13 +27,13 @@ interactions: size to: 5120 text elements. For additional details on the data limitations see https://aka.ms/text-analytics-data-limits"}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: d264d9ac-af0a-4b70-8ae6-d9edf1995d36 + apim-request-id: 5b88884c-2b45-4938-84f0-d8e7eee08078 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:48 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '3' + x-envoy-upstream-service-time: '2' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml index af205c40bd84..e6f412be13fb 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_document_warnings.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,14 +18,14 @@ interactions: string: '{"documents":[{"id":"1","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.02,"negative":0.98},"offset":0,"length":40,"text":"This won''t actually create a warning :''("}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 17e5ad23-85e7-4297-ae1d-2ea4772985b0 + apim-request-id: f6f79805-a05e-42f3-b096-763ff75e1760 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:23:48 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '82' + x-envoy-upstream-service-time: '115' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_duplicate_ids_error.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_duplicate_ids_error.yaml index b84e7aef84d6..b77850a1497d 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_duplicate_ids_error.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_duplicate_ids_error.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,13 +18,13 @@ interactions: string: '{"error":{"code":"InvalidRequest","message":"Invalid document in request.","innererror":{"code":"InvalidDocument","message":"Request contains duplicated Ids. Make sure each document has a unique Id."}}}' headers: - apim-request-id: 995c42de-8ace-49ba-bab8-0ae9dfdf5b1e + apim-request-id: 9bb7f17b-3cd5-4a04-baf9-83d418264d5d content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:49 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '4' + x-envoy-upstream-service-time: '6' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_empty_credential_class.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_empty_credential_class.yaml index 287144467856..f39bc61cd88f 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_empty_credential_class.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_empty_credential_class.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -20,7 +20,7 @@ interactions: subscription and use a correct regional API endpoint for your resource."}}' headers: content-length: '224' - date: Wed, 27 Jan 2021 02:23:49 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT status: code: 401 message: PermissionDenied diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_explicit_set_string_index_type.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_explicit_set_string_index_type.yaml index 5572f8cde729..6d69487b2e9c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_explicit_set_string_index_type.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_explicit_set_string_index_type.yaml @@ -1,33 +1,32 @@ interactions: - request: - body: '{"documents": [{"id": "0", "text": "Bill Gates is the CEO of Microsoft.", - "language": "en"}]}' + body: '{"documents": [{"id": "0", "text": "Hello world", "language": "en"}]}' headers: Accept: - application/json, text/json Content-Length: - - '93' + - '69' Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.7.5 (Linux-4.4.0-19041-Microsoft-x86_64-with-debian-stretch-sid) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=TextElements_v8 response: body: - string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.04,"neutral":0.95,"negative":0.01},"offset":0,"length":35,"text":"Bill - Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.02,"neutral":0.97,"negative":0.01},"offset":0,"length":11,"text":"Hello + world"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 759544b0-17ac-40aa-83bd-97c591e1c9e6 + apim-request-id: c8628594-ed7b-4f3f-b2c5-26e262d3ab96 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Fri, 29 Jan 2021 20:35:26 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '635' + x-envoy-upstream-service-time: '85' status: code: 200 message: OK - url: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=TextElements_v8 + url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=TextElements_v8 version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_all_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_all_errors.yaml index 80468ae67b80..7d847cfbb1e1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_all_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_all_errors.yaml @@ -11,7 +11,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -24,9 +24,9 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 14840f37-574a-4dd8-949d-9f64379463c8 + apim-request-id: b87253af-807c-4973-9db3-6d4225e1c206 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:23:49 GMT + date: Sat, 20 Feb 2021 00:08:49 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_some_errors.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_some_errors.yaml index a8a802d9c3cb..defb0c2b78f7 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_some_errors.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_input_with_some_errors.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -25,14 +25,14 @@ interactions: Language Code.","innererror":{"code":"UnsupportedLanguageCode","message":"Invalid language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 0ce3f6b9-b508-45b2-9386-d7296674b58d + apim-request-id: 0d7deaae-8363-4b66-952d-65a9903221e1 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:23:50 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '122' + x-envoy-upstream-service-time: '139' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_docs.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_docs.yaml index 113b558aa799..188821709f09 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_docs.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_docs.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -19,13 +19,13 @@ interactions: Language Code.","innererror":{"code":"UnsupportedLanguageCode","message":"Invalid language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: cd402741-fdff-4ee9-9441-2428a0cec86d + apim-request-id: 7b30a272-b519-4649-b998-c4966e285494 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:24:37 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '1' + x-envoy-upstream-service-time: '2' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_method.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_method.yaml index abc239724121..b47161354bff 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_method.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_invalid_language_hint_method.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -19,13 +19,13 @@ interactions: Language Code.","innererror":{"code":"UnsupportedLanguageCode","message":"Invalid language code. Supported languages: de,en,es,fr,hi,it,ja,ko,nl,no,pt-BR,pt-PT,tr,zh-Hans,zh-Hant"}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 87e46a89-a9b4-4968-b07b-717201865208 + apim-request-id: 5b36b7ee-660a-40a2-8b4e-2c3ba1b394e7 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:24:37 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '2' + x-envoy-upstream-service-time: '3' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_language_kwarg_spanish.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_language_kwarg_spanish.yaml index eed10c118274..1d7ab80ff427 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_language_kwarg_spanish.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_language_kwarg_spanish.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=latest&showStats=true&stringIndexType=UnicodeCodePoint response: @@ -18,14 +18,14 @@ interactions: string: '{"statistics":{"documentsCount":1,"validDocumentsCount":1,"erroneousDocumentsCount":0,"transactionsCount":1},"documents":[{"id":"0","sentiment":"neutral","statistics":{"charactersCount":35,"transactionsCount":1},"confidenceScores":{"positive":0.01,"neutral":0.98,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.01,"neutral":0.98,"negative":0.01},"offset":0,"length":35,"text":"Bill Gates is the CEO of Microsoft."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: cc24252b-3c93-47c9-88f0-82f749289f0e + apim-request-id: 6acc8c27-7a9f-463d-a366-9149a110e6c9 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:38 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '129' + x-envoy-upstream-service-time: '120' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_no_offset_v3_sentence_sentiment.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_no_offset_v3_sentence_sentiment.yaml index e87918590a52..4a2db75393dd 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_no_offset_v3_sentence_sentiment.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_no_offset_v3_sentence_sentiment.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.0/sentiment?showStats=false response: @@ -19,14 +19,14 @@ interactions: like nature."},{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.43,"negative":0.56},"offset":15,"length":26,"text":"I do not like being inside"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 276bd220-a216-4cfa-af81-c4d27beea04d + apim-request-id: 9ef7e543-2856-4fae-b20c-e0c8c97fde88 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:39 GMT + date: Sat, 20 Feb 2021 00:08:50 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '90' + x-envoy-upstream-service-time: '106' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_offset.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_offset.yaml index 421a00135c17..e31f4963f092 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_offset.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_offset.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -19,14 +19,14 @@ interactions: like nature."},{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.43,"negative":0.56},"offset":15,"length":26,"text":"I do not like being inside"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 9fc520bd-21c3-40a8-bd96-78678e719205 + apim-request-id: 356e383e-b9ec-4c03-b285-f803ba2d8aad content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:39 GMT + date: Sat, 20 Feb 2021 00:08:51 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '93' + x-envoy-upstream-service-time: '100' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining.yaml index fc37ef0fd524..430cce04da7a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining.yaml @@ -10,24 +10,24 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"positive","confidenceScores":{"positive":0.98,"neutral":0.02,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.98,"neutral":0.02,"negative":0.0},"offset":0,"length":74,"text":"It - has a sleek premium aluminum design that makes it beautiful to look at.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":32,"length":6,"text":"design","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":9,"length":5,"text":"sleek","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":7,"text":"premium","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + has a sleek premium aluminum design that makes it beautiful to look at.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":32,"length":6,"text":"design","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":9,"length":5,"text":"sleek","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":7,"text":"premium","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 534667a1-cc6b-45b9-8a75-0c105f600be0 + apim-request-id: b6540355-ce68-4187-92f7-69ee77a0c098 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:40 GMT + date: Sat, 20 Feb 2021 00:14:43 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '100' + x-envoy-upstream-service-time: '129' status: code: 200 message: OK - url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_more_than_5_documents.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_more_than_5_documents.yaml index 3288a21206f1..945859ffecb4 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_more_than_5_documents.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_more_than_5_documents.yaml @@ -17,32 +17,32 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":25,"text":"The - food was unacceptable","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":4,"length":4,"text":"food","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":13,"length":12,"text":"unacceptable","isNegated":false}]}],"warnings":[]},{"id":"1","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":25,"text":"The - rooms were beautiful.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/1/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":9,"text":"beautiful","isNegated":false}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":26,"length":26,"text":"The - AC was good and quiet.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":30,"length":2,"text":"AC","relations":[{"relationType":"opinion","ref":"#/documents/1/sentences/1/opinions/0"},{"relationType":"opinion","ref":"#/documents/1/sentences/1/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":37,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":46,"length":5,"text":"quiet","isNegated":false}]}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"offset":0,"length":50,"text":"The - breakfast was good, but the toilet was smelly.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":9,"text":"breakfast","relations":[{"relationType":"opinion","ref":"#/documents/2/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":32,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/2/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":18,"length":4,"text":"good","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":43,"length":6,"text":"smelly","isNegated":false}]}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":71,"text":"Loved - this hotel - good breakfast - nice shuttle service - clean rooms.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":11,"length":5,"text":"hotel","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/0"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":24,"length":9,"text":"breakfast","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/1"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":41,"length":15,"text":"shuttle - service","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/2"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":65,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/1"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/3"},{"relationType":"opinion","ref":"#/documents/3/sentences/0/opinions/2"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":19,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":36,"length":4,"text":"nice","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":5,"text":"loved","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":59,"length":5,"text":"clean","isNegated":false}]}],"warnings":[]},{"id":"4","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":56,"text":"I - had a great unobstructed view of the Microsoft campus.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"negative":0.03},"offset":27,"length":4,"text":"view","relations":[{"relationType":"opinion","ref":"#/documents/4/sentences/0/opinions/0"},{"relationType":"opinion","ref":"#/documents/4/sentences/0/opinions/1"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":8,"length":5,"text":"great","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":0.93,"negative":0.07},"offset":14,"length":12,"text":"unobstructed","isNegated":false}]}],"warnings":[]},{"id":"5","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":75,"text":"Nice - rooms but bathrooms were old and the toilet was dirty when we arrived.","aspects":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":5,"length":5,"text":"rooms","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":15,"length":9,"text":"bathrooms","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/1"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":42,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/5/sentences/0/opinions/2"}]}],"opinions":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":4,"text":"nice","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":30,"length":3,"text":"old","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":53,"length":5,"text":"dirty","isNegated":false}]}],"warnings":[]},{"id":"6","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"offset":0,"length":19,"text":"The - toilet smelled.","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":6,"text":"toilet","relations":[{"relationType":"opinion","ref":"#/documents/6/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":11,"length":7,"text":"smelled","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + food was unacceptable","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":4,"length":4,"text":"food","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":13,"length":12,"text":"unacceptable","isNegated":false}]}],"warnings":[]},{"id":"1","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":25,"text":"The + rooms were beautiful.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/1/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":15,"length":9,"text":"beautiful","isNegated":false}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":26,"length":26,"text":"The + AC was good and quiet.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":30,"length":2,"text":"AC","relations":[{"relationType":"assessment","ref":"#/documents/1/sentences/1/assessments/0"},{"relationType":"assessment","ref":"#/documents/1/sentences/1/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":37,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":46,"length":5,"text":"quiet","isNegated":false}]}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"offset":0,"length":50,"text":"The + breakfast was good, but the toilet was smelly.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":4,"length":9,"text":"breakfast","relations":[{"relationType":"assessment","ref":"#/documents/2/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":32,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/2/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":18,"length":4,"text":"good","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":43,"length":6,"text":"smelly","isNegated":false}]}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":71,"text":"Loved + this hotel - good breakfast - nice shuttle service - clean rooms.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":11,"length":5,"text":"hotel","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/0"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":24,"length":9,"text":"breakfast","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/1"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":41,"length":15,"text":"shuttle + service","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/2"}]},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":65,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/1"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/3"},{"relationType":"assessment","ref":"#/documents/3/sentences/0/assessments/2"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":19,"length":4,"text":"good","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":36,"length":4,"text":"nice","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":5,"text":"loved","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":59,"length":5,"text":"clean","isNegated":false}]}],"warnings":[]},{"id":"4","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":56,"text":"I + had a great unobstructed view of the Microsoft campus.","targets":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"negative":0.03},"offset":27,"length":4,"text":"view","relations":[{"relationType":"assessment","ref":"#/documents/4/sentences/0/assessments/0"},{"relationType":"assessment","ref":"#/documents/4/sentences/0/assessments/1"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":8,"length":5,"text":"great","isNegated":false},{"sentiment":"positive","confidenceScores":{"positive":0.93,"negative":0.07},"offset":14,"length":12,"text":"unobstructed","isNegated":false}]}],"warnings":[]},{"id":"5","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":75,"text":"Nice + rooms but bathrooms were old and the toilet was dirty when we arrived.","targets":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":5,"length":5,"text":"rooms","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":15,"length":9,"text":"bathrooms","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/1"}]},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":42,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/5/sentences/0/assessments/2"}]}],"assessments":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"negative":0.0},"offset":0,"length":4,"text":"nice","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":30,"length":3,"text":"old","isNegated":false},{"sentiment":"negative","confidenceScores":{"positive":0.0,"negative":1.0},"offset":53,"length":5,"text":"dirty","isNegated":false}]}],"warnings":[]},{"id":"6","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.63,"negative":0.34},"offset":0,"length":19,"text":"The + toilet smelled.","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":6,"text":"toilet","relations":[{"relationType":"assessment","ref":"#/documents/6/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":11,"length":7,"text":"smelled","isNegated":false}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 389ae37e-fcc6-41d0-8a03-956d557a1b3a + apim-request-id: d387f3ba-5af2-4512-83a1-850e1639e9c7 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=7 - date: Wed, 27 Jan 2021 02:24:41 GMT + date: Sat, 20 Feb 2021 00:16:29 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '1039' + x-envoy-upstream-service-time: '4121' status: code: 200 message: OK - url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_no_mined_opinions.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_no_mined_opinions.yaml index 8bcfd65b468d..66db4eecfb20 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_no_mined_opinions.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_no_mined_opinions.yaml @@ -9,7 +9,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: @@ -17,14 +17,14 @@ interactions: string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.1,"neutral":0.88,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.1,"neutral":0.88,"negative":0.02},"offset":0,"length":18,"text":"today is a hot day","aspects":[],"opinions":[]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 48a34c20-099d-4e57-bdfc-e6c07114c5d3 + apim-request-id: 79655b71-b162-464c-9b92-555dd732cec6 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:42 GMT + date: Tue, 23 Feb 2021 01:41:11 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '1167' + x-envoy-upstream-service-time: '170' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_with_negated_opinion.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_with_negated_opinion.yaml index d0ef85f3652d..048ffeaac22c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_with_negated_opinion.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_opinion_mining_with_negated_opinion.yaml @@ -10,24 +10,24 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST - uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"0","sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.0,"neutral":0.0,"negative":1.0},"offset":0,"length":32,"text":"The - food and service is not good","aspects":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":4,"text":"food","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":13,"length":7,"text":"service","relations":[{"relationType":"opinion","ref":"#/documents/0/sentences/0/opinions/0"}]}],"opinions":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":28,"length":4,"text":"good","isNegated":true}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' + food and service is not good","targets":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":4,"length":4,"text":"food","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]},{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":13,"length":7,"text":"service","relations":[{"relationType":"assessment","ref":"#/documents/0/sentences/0/assessments/0"}]}],"assessments":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"negative":0.99},"offset":28,"length":4,"text":"good","isNegated":true}]}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 3ac192b2-7335-4e02-a0dd-780a0468264c + apim-request-id: e170debb-6f75-4585-9432-ff7076914729 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:44 GMT + date: Sat, 20 Feb 2021 00:20:42 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '663' + x-envoy-upstream-service-time: '131' status: code: 200 message: OK - url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.3/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint + url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.4/sentiment?showStats=false&opinionMining=true&stringIndexType=UnicodeCodePoint version: 1 diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_out_of_order_ids.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_out_of_order_ids.yaml index f38730587fe2..201b0400ad51 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_out_of_order_ids.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_out_of_order_ids.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -21,14 +21,14 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: e14dd7db-6edd-4d0a-b803-7e12243a5513 + apim-request-id: 42d9ed0a-692b-4128-8aef-e281526b8c8f content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=4 - date: Wed, 27 Jan 2021 02:24:45 GMT + date: Sat, 20 Feb 2021 00:08:53 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '101' + x-envoy-upstream-service-time: '96' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_output_same_order_as_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_output_same_order_as_input.yaml index b525a73878da..aa2644ce2cc9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_output_same_order_as_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_output_same_order_as_input.yaml @@ -12,21 +12,21 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: string: '{"documents":[{"id":"1","sentiment":"neutral","confidenceScores":{"positive":0.06,"neutral":0.9,"negative":0.04},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.06,"neutral":0.9,"negative":0.04},"offset":0,"length":3,"text":"one"}],"warnings":[]},{"id":"2","sentiment":"neutral","confidenceScores":{"positive":0.01,"neutral":0.97,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.01,"neutral":0.97,"negative":0.02},"offset":0,"length":3,"text":"two"}],"warnings":[]},{"id":"3","sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"offset":0,"length":5,"text":"three"}],"warnings":[]},{"id":"4","sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.96,"negative":0.01},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.03,"neutral":0.96,"negative":0.01},"offset":0,"length":4,"text":"four"}],"warnings":[]},{"id":"5","sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.05,"neutral":0.93,"negative":0.02},"offset":0,"length":4,"text":"five"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 23dd485e-bf28-4f0d-97e1-c03932e186ac + apim-request-id: d5a6409d-bcc4-40f6-a28f-ccc9cf77f2ae content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=5 - date: Wed, 27 Jan 2021 02:24:45 GMT + date: Sat, 20 Feb 2021 00:08:52 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '86' + x-envoy-upstream-service-time: '108' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_pass_cls.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_pass_cls.yaml index 50fcec899764..5896b287433a 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_pass_cls.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_pass_cls.yaml @@ -10,7 +10,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -18,14 +18,14 @@ interactions: string: '{"documents":[{"id":"0","sentiment":"neutral","confidenceScores":{"positive":0.32,"neutral":0.65,"negative":0.03},"sentences":[{"sentiment":"neutral","confidenceScores":{"positive":0.32,"neutral":0.65,"negative":0.03},"offset":0,"length":28,"text":"Test passing cls to endpoint"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: c050f104-16d5-4f2b-a8b7-1df8621d3292 + apim-request-id: e9899321-6f3d-4b0f-a767-bf1e7cbe61ee content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:24:45 GMT + date: Sat, 20 Feb 2021 00:08:52 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '85' + x-envoy-upstream-service-time: '76' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_passing_only_string.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_passing_only_string.yaml index d4806329e495..3bc6bbcc53f1 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_passing_only_string.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_passing_only_string.yaml @@ -13,7 +13,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -27,14 +27,14 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: c7547646-a7da-4b4f-b081-0d153f56dfc3 + apim-request-id: c73895a0-acdb-4dfd-9c37-e7026d98d541 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:24:45 GMT + date: Sat, 20 Feb 2021 00:08:52 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '97' + x-envoy-upstream-service-time: '106' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_per_item_dont_use_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_per_item_dont_use_language_hint.yaml index eb8fd979871e..8da59da05536 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_per_item_dont_use_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_per_item_dont_use_language_hint.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 855916c5-8cda-4392-bb77-06f171980ceb + apim-request-id: 0859a02b-9e2e-4a5b-9df8-f509595fec8c content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:24:46 GMT + date: Sat, 20 Feb 2021 00:08:53 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '101' + x-envoy-upstream-service-time: '85' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_rotate_subscription_key.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_rotate_subscription_key.yaml index 18281c7a0efd..d9fbbeb529c2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_rotate_subscription_key.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_rotate_subscription_key.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 6bd77804-bf79-4e91-acc7-aae48b877eca + apim-request-id: 049e2706-936e-440e-8172-fc3a63075a98 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:24:46 GMT + date: Sat, 20 Feb 2021 00:08:53 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '94' + x-envoy-upstream-service-time: '615' status: code: 200 message: OK @@ -47,7 +47,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -57,7 +57,7 @@ interactions: subscription and use a correct regional API endpoint for your resource."}}' headers: content-length: '224' - date: Wed, 27 Jan 2021 02:24:46 GMT + date: Sat, 20 Feb 2021 00:08:53 GMT status: code: 401 message: PermissionDenied @@ -75,7 +75,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -85,14 +85,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: f8ba60fe-b8e1-409d-a41f-c3086fd5796f + apim-request-id: 3d3bdd7b-b46a-4ca6-8873-d6ffeab9afa9 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:24:47 GMT + date: Sat, 20 Feb 2021 00:08:53 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '106' + x-envoy-upstream-service-time: '97' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_show_stats_and_model_version.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_show_stats_and_model_version.yaml index b83677b5f883..bd72b8b96db7 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_show_stats_and_model_version.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_show_stats_and_model_version.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?model-version=latest&showStats=true&stringIndexType=UnicodeCodePoint response: @@ -21,14 +21,14 @@ interactions: document in request.","innererror":{"code":"InvalidDocument","message":"Document text is empty."}}}],"modelVersion":"2020-04-01"}' headers: - apim-request-id: d26ed04e-419b-410f-8d8b-87874f816faa + apim-request-id: 0fbd652d-ddf8-4d6d-a6b3-70be3a490b1e content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=4 - date: Wed, 27 Jan 2021 02:24:47 GMT + date: Sat, 20 Feb 2021 00:08:54 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '94' + x-envoy-upstream-service-time: '167' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_string_index_type_not_fail_v3.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_string_index_type_not_fail_v3.yaml index e8cc454fd055..9fd0ee3de19e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_string_index_type_not_fail_v3.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_string_index_type_not_fail_v3.yaml @@ -9,7 +9,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.0/sentiment?showStats=false response: @@ -17,14 +17,14 @@ interactions: string: '{"documents":[{"id":"0","sentiment":"positive","confidenceScores":{"positive":0.99,"neutral":0.0,"negative":0.01},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.99,"neutral":0.0,"negative":0.01},"offset":0,"length":17,"text":"please don''t fail"}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: ec7b2370-c0a7-400c-a764-938115698de8 + apim-request-id: 7eea460b-4749-475b-a8a8-c17c8588a290 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=1 - date: Wed, 27 Jan 2021 02:21:28 GMT + date: Sat, 20 Feb 2021 00:08:54 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '87' + x-envoy-upstream-service-time: '108' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_too_many_documents.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_too_many_documents.yaml index 6a277ae12d93..1ebea46adb55 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_too_many_documents.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_too_many_documents.yaml @@ -15,7 +15,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,13 +23,13 @@ interactions: string: '{"error":{"code":"InvalidRequest","message":"Invalid document in request.","innererror":{"code":"InvalidDocumentBatch","message":"Batch request contains too many records. Max 10 records are permitted."}}}' headers: - apim-request-id: 6643eaae-f9cb-4355-9a94-d57ec27ba64f + apim-request-id: f54d235c-3c9a-4f7a-b0e4-8aeb710ce307 content-type: application/json; charset=utf-8 - date: Wed, 27 Jan 2021 02:21:29 GMT + date: Sat, 20 Feb 2021 00:08:54 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '4' + x-envoy-upstream-service-time: '5' status: code: 400 message: Bad Request diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_user_agent.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_user_agent.yaml index 1ae76b206a1e..5b6b29a3a3e4 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_user_agent.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_user_agent.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 0c1a7a34-a070-4069-979b-2092fcd192f3 + apim-request-id: da773e5c-6f91-4133-801f-a221bc5ff3ef content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:29 GMT + date: Sat, 20 Feb 2021 00:08:54 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '104' + x-envoy-upstream-service-time: '88' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_dont_use_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_dont_use_language_hint.yaml index 33641c0e8c0c..32ff8c1c4de2 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_dont_use_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_dont_use_language_hint.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,14 +23,14 @@ interactions: was too expensive."}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.0,"negative":0.99},"offset":0,"length":42,"text":"The restaurant was not as good as I hoped."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: ce904ddb-a1af-4b8f-852f-0645a5e9ec00 + apim-request-id: 1b802c28-098b-4a3d-a912-925728f66433 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:31 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '103' + x-envoy-upstream-service-time: '112' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint.yaml index 46c8c3f67bc9..f233d2eb59ec 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -23,14 +23,14 @@ interactions: was too expensive."}],"warnings":[]},{"id":"2","sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.32,"negative":0.67},"sentences":[{"sentiment":"negative","confidenceScores":{"positive":0.01,"neutral":0.32,"negative":0.67},"offset":0,"length":42,"text":"The restaurant was not as good as I hoped."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: 7bdce1c2-ff3c-42bf-834b-c831df53230e + apim-request-id: 614b9993-3c8b-4586-8b5c-a10f9076f8f2 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:31 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '117' + x-envoy-upstream-service-time: '116' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_input.yaml index 6093173c8db1..5a5bc5e7019b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_input.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":0.97,"neutral":0.02,"negative":0.01},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: da241cdf-1975-4c1d-ab4a-38558c2c346a + apim-request-id: e8b9cdb6-1be8-426d-8b88-59e87b713b2c content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:32 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '879' + x-envoy-upstream-service-time: '101' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_per_item_hints.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_per_item_hints.yaml index 1aa26064e724..933d680cb772 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_per_item_hints.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_dict_per_item_hints.yaml @@ -12,7 +12,7 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: @@ -22,14 +22,14 @@ interactions: did not like the hotel we stayed at."}],"warnings":[]},{"id":"3","sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"sentences":[{"sentiment":"positive","confidenceScores":{"positive":1.0,"neutral":0.0,"negative":0.0},"offset":0,"length":36,"text":"The restaurant had really good food."}],"warnings":[]}],"errors":[],"modelVersion":"2020-04-01"}' headers: - apim-request-id: aea6e3f8-400e-427c-bf2e-71494e958fbd + apim-request-id: 977c863f-c5c3-469b-ad62-982174c10e5d content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:33 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '283' + x-envoy-upstream-service-time: '131' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_input.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_input.yaml index 82cbbab3a6b0..240fdb378127 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_input.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_input.yaml @@ -12,34 +12,23 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\"\ - :{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"sentences\":[{\"\ - sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\"\ - :0.81,\"negative\":0.04},\"offset\":0,\"length\":41,\"text\":\"I should take\ - \ my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"4\",\"sentiment\"\ - :\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\"\ - :0.14},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"\ - positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"offset\":0,\"length\"\ - :39,\"text\":\"Este es un document escrito en Espa\xF1ol.\"}],\"warnings\"\ - :[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\"\ - :0.15,\"neutral\":0.79,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"\ - neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\"\ - :0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"\ - warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" + string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.81,\"negative\":0.04},\"offset\":0,\"length\":41,\"text\":\"I + should take my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"4\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.38,\"neutral\":0.48,\"negative\":0.14},\"offset\":0,\"length\":39,\"text\":\"Este + es un document escrito en Espa\xF1ol.\"}],\"warnings\":[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.15,\"neutral\":0.79,\"negative\":0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" headers: - apim-request-id: 2f8adbe8-04b4-4bbb-8f8e-a64dcec195cc + apim-request-id: 0ad29037-341f-4e3d-9e19-1eed62b062d7 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:34 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '121' + x-envoy-upstream-service-time: '94' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_per_item_hints.yaml b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_per_item_hints.yaml index 352620525348..d83f84da8791 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_per_item_hints.yaml +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/recordings/test_analyze_sentiment_async.test_whole_batch_language_hint_and_obj_per_item_hints.yaml @@ -12,34 +12,23 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-textanalytics/5.1.0b5 Python/3.8.5 (macOS-10.13.6-x86_64-i386-64bit) + - azsdk-python-ai-textanalytics/5.1.0b6 Python/3.7.9 (Linux-4.4.0-19041-Microsoft-x86_64-with-Ubuntu-16.04-xenial) method: POST uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.3/sentiment?showStats=false&stringIndexType=UnicodeCodePoint response: body: - string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\"\ - :{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"sentences\":[{\"\ - sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\"\ - :0.94,\"negative\":0.01},\"offset\":0,\"length\":41,\"text\":\"I should take\ - \ my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"2\",\"sentiment\"\ - :\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\"\ - :0.05},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"\ - positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"offset\":0,\"length\"\ - :39,\"text\":\"Este es un document escrito en Espa\xF1ol.\"}],\"warnings\"\ - :[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\"\ - :0.13,\"neutral\":0.81,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"\ - neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\"\ - :0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"\ - warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" + string: "{\"documents\":[{\"id\":\"1\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.05,\"neutral\":0.94,\"negative\":0.01},\"offset\":0,\"length\":41,\"text\":\"I + should take my cat to the veterinarian.\"}],\"warnings\":[]},{\"id\":\"2\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.03,\"neutral\":0.92,\"negative\":0.05},\"offset\":0,\"length\":39,\"text\":\"Este + es un document escrito en Espa\xF1ol.\"}],\"warnings\":[]},{\"id\":\"3\",\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\":0.06},\"sentences\":[{\"sentiment\":\"neutral\",\"confidenceScores\":{\"positive\":0.13,\"neutral\":0.81,\"negative\":0.06},\"offset\":0,\"length\":4,\"text\":\"\u732B\u306F\u5E78\u305B\"}],\"warnings\":[]}],\"errors\":[],\"modelVersion\":\"2020-04-01\"}" headers: - apim-request-id: 1497a0c2-388b-4470-a942-b2a3782e62e3 + apim-request-id: 372279f1-9d44-47c6-8860-5b7da6b16582 content-type: application/json; charset=utf-8 csp-billing-usage: CognitiveServices.TextAnalytics.BatchScoring=3 - date: Wed, 27 Jan 2021 02:21:34 GMT + date: Sat, 20 Feb 2021 00:08:55 GMT strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '119' + x-envoy-upstream-service-time: '123' status: code: 200 message: OK diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py index cd56c7d00308..0d89470a0d1b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment.py @@ -576,7 +576,7 @@ def callback(pipeline_response, deserialized, _): assert res == "cls result" @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) def test_opinion_mining(self, client): documents = [ "It has a sleek premium aluminum design that makes it beautiful to look at." @@ -586,14 +586,14 @@ def test_opinion_mining(self, client): for sentence in document.sentences: for mined_opinion in sentence.mined_opinions: - aspect = mined_opinion.aspect - self.assertEqual('design', aspect.text) - self.assertEqual('positive', aspect.sentiment) - self.assertEqual(0.0, aspect.confidence_scores.neutral) - self.validateConfidenceScores(aspect.confidence_scores) - self.assertEqual(32, aspect.offset) - - sleek_opinion = mined_opinion.opinions[0] + target = mined_opinion.target + self.assertEqual('design', target.text) + self.assertEqual('positive', target.sentiment) + self.assertEqual(0.0, target.confidence_scores.neutral) + self.validateConfidenceScores(target.confidence_scores) + self.assertEqual(32, target.offset) + + sleek_opinion = mined_opinion.assessments[0] self.assertEqual('sleek', sleek_opinion.text) self.assertEqual('positive', sleek_opinion.sentiment) self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral) @@ -601,7 +601,7 @@ def test_opinion_mining(self, client): self.assertEqual(9, sleek_opinion.offset) self.assertFalse(sleek_opinion.is_negated) - premium_opinion = mined_opinion.opinions[1] + premium_opinion = mined_opinion.assessments[1] self.assertEqual('premium', premium_opinion.text) self.assertEqual('positive', premium_opinion.sentiment) self.assertEqual(0.0, premium_opinion.confidence_scores.neutral) @@ -610,7 +610,7 @@ def test_opinion_mining(self, client): self.assertFalse(premium_opinion.is_negated) @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) def test_opinion_mining_with_negated_opinion(self, client): documents = [ "The food and service is not good" @@ -619,23 +619,23 @@ def test_opinion_mining_with_negated_opinion(self, client): document = client.analyze_sentiment(documents=documents, show_opinion_mining=True)[0] for sentence in document.sentences: - food_aspect = sentence.mined_opinions[0].aspect - service_aspect = sentence.mined_opinions[1].aspect - - self.assertEqual('food', food_aspect.text) - self.assertEqual('negative', food_aspect.sentiment) - self.assertEqual(0.0, food_aspect.confidence_scores.neutral) - self.validateConfidenceScores(food_aspect.confidence_scores) - self.assertEqual(4, food_aspect.offset) - - self.assertEqual('service', service_aspect.text) - self.assertEqual('negative', service_aspect.sentiment) - self.assertEqual(0.0, service_aspect.confidence_scores.neutral) - self.validateConfidenceScores(service_aspect.confidence_scores) - self.assertEqual(13, service_aspect.offset) - - food_opinion = sentence.mined_opinions[0].opinions[0] - service_opinion = sentence.mined_opinions[1].opinions[0] + food_target = sentence.mined_opinions[0].target + service_target = sentence.mined_opinions[1].target + + self.assertEqual('food', food_target.text) + self.assertEqual('negative', food_target.sentiment) + self.assertEqual(0.0, food_target.confidence_scores.neutral) + self.validateConfidenceScores(food_target.confidence_scores) + self.assertEqual(4, food_target.offset) + + self.assertEqual('service', service_target.text) + self.assertEqual('negative', service_target.sentiment) + self.assertEqual(0.0, service_target.confidence_scores.neutral) + self.validateConfidenceScores(service_target.confidence_scores) + self.assertEqual(13, service_target.offset) + + food_opinion = sentence.mined_opinions[0].assessments[0] + service_opinion = sentence.mined_opinions[1].assessments[0] self.assertOpinionsEqual(food_opinion, service_opinion) self.assertEqual('good', food_opinion.text) @@ -647,7 +647,7 @@ def test_opinion_mining_with_negated_opinion(self, client): @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) def test_opinion_mining_more_than_5_documents(self, client): documents = [ "The food was unacceptable", @@ -667,14 +667,14 @@ def test_opinion_mining_more_than_5_documents(self, client): opinion.text for sentence in doc_5.sentences for mined_opinion in sentence.mined_opinions - for opinion in mined_opinion.opinions + for opinion in mined_opinion.assessments ] doc_6_opinions = [ opinion.text for sentence in doc_6.sentences for mined_opinion in sentence.mined_opinions - for opinion in mined_opinion.opinions + for opinion in mined_opinion.assessments ] assert doc_5_opinions == ["nice", "old", "dirty"] diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py index 5a06b66b157e..fc19bd20f36e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py @@ -592,7 +592,7 @@ def callback(pipeline_response, deserialized, _): assert res == "cls result" @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) async def test_opinion_mining(self, client): documents = [ "It has a sleek premium aluminum design that makes it beautiful to look at." @@ -602,14 +602,14 @@ async def test_opinion_mining(self, client): for sentence in document.sentences: for mined_opinion in sentence.mined_opinions: - aspect = mined_opinion.aspect - self.assertEqual('design', aspect.text) - self.assertEqual('positive', aspect.sentiment) - self.assertEqual(0.0, aspect.confidence_scores.neutral) - self.validateConfidenceScores(aspect.confidence_scores) - self.assertEqual(32, aspect.offset) - - sleek_opinion = mined_opinion.opinions[0] + target = mined_opinion.target + self.assertEqual('design', target.text) + self.assertEqual('positive', target.sentiment) + self.assertEqual(0.0, target.confidence_scores.neutral) + self.validateConfidenceScores(target.confidence_scores) + self.assertEqual(32, target.offset) + + sleek_opinion = mined_opinion.assessments[0] self.assertEqual('sleek', sleek_opinion.text) self.assertEqual('positive', sleek_opinion.sentiment) self.assertEqual(0.0, sleek_opinion.confidence_scores.neutral) @@ -617,7 +617,7 @@ async def test_opinion_mining(self, client): self.assertEqual(9, sleek_opinion.offset) self.assertFalse(sleek_opinion.is_negated) - premium_opinion = mined_opinion.opinions[1] + premium_opinion = mined_opinion.assessments[1] self.assertEqual('premium', premium_opinion.text) self.assertEqual('positive', premium_opinion.sentiment) self.assertEqual(0.0, premium_opinion.confidence_scores.neutral) @@ -626,7 +626,7 @@ async def test_opinion_mining(self, client): self.assertFalse(premium_opinion.is_negated) @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) async def test_opinion_mining_with_negated_opinion(self, client): documents = [ "The food and service is not good" @@ -635,23 +635,23 @@ async def test_opinion_mining_with_negated_opinion(self, client): document = (await client.analyze_sentiment(documents=documents, show_opinion_mining=True))[0] for sentence in document.sentences: - food_aspect = sentence.mined_opinions[0].aspect - service_aspect = sentence.mined_opinions[1].aspect - - self.assertEqual('food', food_aspect.text) - self.assertEqual('negative', food_aspect.sentiment) - self.assertEqual(0.0, food_aspect.confidence_scores.neutral) - self.validateConfidenceScores(food_aspect.confidence_scores) - self.assertEqual(4, food_aspect.offset) - - self.assertEqual('service', service_aspect.text) - self.assertEqual('negative', service_aspect.sentiment) - self.assertEqual(0.0, service_aspect.confidence_scores.neutral) - self.validateConfidenceScores(service_aspect.confidence_scores) - self.assertEqual(13, service_aspect.offset) - - food_opinion = sentence.mined_opinions[0].opinions[0] - service_opinion = sentence.mined_opinions[1].opinions[0] + food_target = sentence.mined_opinions[0].target + service_target = sentence.mined_opinions[1].target + + self.assertEqual('food', food_target.text) + self.assertEqual('negative', food_target.sentiment) + self.assertEqual(0.0, food_target.confidence_scores.neutral) + self.validateConfidenceScores(food_target.confidence_scores) + self.assertEqual(4, food_target.offset) + + self.assertEqual('service', service_target.text) + self.assertEqual('negative', service_target.sentiment) + self.assertEqual(0.0, service_target.confidence_scores.neutral) + self.validateConfidenceScores(service_target.confidence_scores) + self.assertEqual(13, service_target.offset) + + food_opinion = sentence.mined_opinions[0].assessments[0] + service_opinion = sentence.mined_opinions[1].assessments[0] self.assertOpinionsEqual(food_opinion, service_opinion) self.assertEqual('good', food_opinion.text) @@ -662,7 +662,7 @@ async def test_opinion_mining_with_negated_opinion(self, client): self.assertTrue(food_opinion.is_negated) @GlobalTextAnalyticsAccountPreparer() - @TextAnalyticsClientPreparer() + @TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_4}) async def test_opinion_mining_more_than_5_documents(self, client): documents = [ "The food was unacceptable", @@ -682,14 +682,14 @@ async def test_opinion_mining_more_than_5_documents(self, client): opinion.text for sentence in doc_5.sentences for mined_opinion in sentence.mined_opinions - for opinion in mined_opinion.opinions + for opinion in mined_opinion.assessments ] doc_6_opinions = [ opinion.text for sentence in doc_6.sentences for mined_opinion in sentence.mined_opinions - for opinion in mined_opinion.opinions + for opinion in mined_opinion.assessments ] assert doc_5_opinions == ["nice", "old", "dirty"] diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_json_pointer.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_json_pointer.py index 2d1e572dea46..a15616d5cdb9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_json_pointer.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_json_pointer.py @@ -7,20 +7,20 @@ import pytest from azure.ai.textanalytics._models import ( AnalyzeSentimentResult, - AspectSentiment, - OpinionSentiment, + TargetSentiment, + AssessmentSentiment, SentenceSentiment, _get_indices, ) from azure.ai.textanalytics._response_handlers import sentiment_result -from azure.ai.textanalytics._generated.v3_1_preview_3 import models as _generated_models +from azure.ai.textanalytics._generated.v3_1_preview_4 import models as _generated_models @pytest.fixture -def generated_aspect_opinion_confidence_scores(): - return _generated_models.AspectConfidenceScoreLabel( +def generated_target_assessment_confidence_scores(): + return _generated_models.TargetConfidenceScoreLabel( positive=1.0, neutral=0.0, negative=0.0, @@ -35,55 +35,55 @@ def generated_sentiment_confidence_score(): ) @pytest.fixture -def generated_aspect_relation(): - return _generated_models.AspectRelation( - relation_type="opinion", - ref="#/documents/0/sentences/1/opinions/0" +def generated_target_relation(): + return _generated_models.TargetRelation( + relation_type="assessment", + ref="#/documents/0/sentences/1/assessments/0" ) @pytest.fixture -def generated_aspect(generated_aspect_opinion_confidence_scores, generated_aspect_relation): - return _generated_models.SentenceAspect( +def generated_target(generated_target_assessment_confidence_scores, generated_target_relation): + return _generated_models.SentenceTarget( text="aspect", sentiment="positive", - confidence_scores=generated_aspect_opinion_confidence_scores, + confidence_scores=generated_target_assessment_confidence_scores, offset=0, length=6, - relations=[generated_aspect_relation], + relations=[generated_target_relation], ) @pytest.fixture -def generated_opinion(generated_aspect_opinion_confidence_scores): - return _generated_models.SentenceOpinion( +def generated_assessment(generated_target_assessment_confidence_scores): + return _generated_models.SentenceAssessment( text="good", sentiment="positive", - confidence_scores=generated_aspect_opinion_confidence_scores, + confidence_scores=generated_target_assessment_confidence_scores, offset=0, length=4, is_negated=False, ) -def generated_sentence_sentiment(generated_sentiment_confidence_score, index, aspects=[], opinions=[]): +def generated_sentence_sentiment(generated_sentiment_confidence_score, index, targets=[], assessments=[]): return _generated_models.SentenceSentiment( text="not relevant", sentiment="positive", confidence_scores=generated_sentiment_confidence_score, offset=0, length=12, - aspects=aspects, - opinions=opinions, + targets=targets, + assessments=assessments, ) @pytest.fixture -def generated_document_sentiment(generated_aspect, generated_opinion, generated_sentiment_confidence_score): - aspect_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=0, aspects=[generated_aspect]) - opinion_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=1, opinions=[generated_opinion]) +def generated_document_sentiment(generated_target, generated_assessment, generated_sentiment_confidence_score): + target_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=0, targets=[generated_target]) + assessment_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=1, assessments=[generated_assessment]) return _generated_models.DocumentSentiment( id=1, sentiment="positive", confidence_scores=generated_sentiment_confidence_score, - sentences=[aspect_sentence, opinion_sentence], + sentences=[target_sentence, assessment_sentence], warnings=[], ) @@ -99,13 +99,13 @@ def generated_sentiment_response(generated_document_sentiment): class TestJsonPointer(): def test_json_pointer_parsing(self): - assert [1, 0, 15] == _get_indices("#/documents/1/sentences/0/opinions/15") + assert [1, 0, 15] == _get_indices("#/documents/1/sentences/0/assessments/15") - def test_opinion_different_sentence_aspect(self, generated_sentiment_response): - # the first sentence has the aspect, and the second sentence has the opinion - # the desired behavior is the first wrapped sentence object has an aspect, and it's opinion + def test_opinion_different_sentence_target(self, generated_sentiment_response): + # the first sentence has the target, and the second sentence has the assessment + # the desired behavior is the first wrapped sentence object has a target, and it's assessment # is in the second sentence. - # the second sentence will have no mined opinions, since we define that as an aspect and opinion duo + # the second sentence will have no mined opinions, since we define that as a target and assessment duo wrapped_sentiment = sentiment_result("not relevant", generated_sentiment_response, {})[0] - assert wrapped_sentiment.sentences[0].mined_opinions[0].opinions[0].text == "good" + assert wrapped_sentiment.sentences[0].mined_opinions[0].assessments[0].text == "good" assert not wrapped_sentiment.sentences[1].mined_opinions diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py index 843753d06702..5653966b7d06 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py @@ -8,7 +8,7 @@ import pytest import datetime from azure.ai.textanalytics import _models -from azure.ai.textanalytics._generated.v3_1_preview_3 import models as _generated_models +from azure.ai.textanalytics._generated.v3_1_preview_4 import models as _generated_models # All features return a tuple of the object and the repr of the obejct @@ -144,7 +144,7 @@ def sentiment_confidence_scores(): return model, model_repr @pytest.fixture -def aspect_opinion_confidence_score(): +def target_assessment_confidence_score(): model = _models.SentimentConfidenceScores( positive=0.5, negative=0.5 @@ -154,43 +154,43 @@ def aspect_opinion_confidence_score(): return model, model_repr @pytest.fixture -def aspect_sentiment(aspect_opinion_confidence_score): - model = _models.AspectSentiment( +def target_sentiment(target_assessment_confidence_score): + model = _models.TargetSentiment( text="aspect", sentiment="positive", - confidence_scores=aspect_opinion_confidence_score[0], + confidence_scores=target_assessment_confidence_score[0], length=6, offset=10, ) - model_repr = "AspectSentiment(text=aspect, sentiment=positive, confidence_scores={}, length=6, offset=10)".format( - aspect_opinion_confidence_score[1] + model_repr = "TargetSentiment(text=aspect, sentiment=positive, confidence_scores={}, length=6, offset=10)".format( + target_assessment_confidence_score[1] ) assert repr(model) == model_repr return model, model_repr @pytest.fixture -def opinion_sentiment(aspect_opinion_confidence_score): - model = _models.OpinionSentiment( +def assessment_sentiment(target_assessment_confidence_score): + model = _models.AssessmentSentiment( text="opinion", sentiment="positive", - confidence_scores=aspect_opinion_confidence_score[0], + confidence_scores=target_assessment_confidence_score[0], length=7, offset=3, is_negated=False ) - model_repr = "OpinionSentiment(text=opinion, sentiment=positive, confidence_scores={}, length=7, offset=3, is_negated=False)".format( - aspect_opinion_confidence_score[1] + model_repr = "AssessmentSentiment(text=opinion, sentiment=positive, confidence_scores={}, length=7, offset=3, is_negated=False)".format( + target_assessment_confidence_score[1] ) assert repr(model) == model_repr return model, model_repr @pytest.fixture -def mined_opinion(aspect_sentiment, opinion_sentiment): +def mined_opinion(target_sentiment, assessment_sentiment): model = _models.MinedOpinion( - aspect=aspect_sentiment[0], - opinions=[opinion_sentiment[0]] + target=target_sentiment[0], + assessments=[assessment_sentiment[0]] ) - model_repr = "MinedOpinion(aspect={}, opinions=[{}])".format(aspect_sentiment[1], opinion_sentiment[1]) + model_repr = "MinedOpinion(target={}, assessments=[{}])".format(target_sentiment[1], assessment_sentiment[1]) assert repr(model) == model_repr return model, model_repr