diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py
deleted file mode 100644
index 748335a88191..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from ._text_analytics_client import TextAnalyticsClient
-from ._version import VERSION
-from ._models import (
- DetectLanguageInput,
- TextDocumentInput,
- DetectedLanguage,
- DocumentError,
- CategorizedEntity,
- LinkedEntity,
- AnalyzeSentimentResult,
- RecognizeEntitiesResult,
- DetectLanguageResult,
- TextAnalyticsError,
- TextAnalyticsWarning,
- ExtractKeyPhrasesResult,
- RecognizeLinkedEntitiesResult,
- TextDocumentStatistics,
- LinkedEntityMatch,
- TextDocumentBatchStatistics,
- SentenceSentiment,
- SentimentConfidenceScores
-)
-
-__all__ = [
- 'TextAnalyticsClient',
- 'DetectLanguageInput',
- 'TextDocumentInput',
- 'DetectedLanguage',
- 'RecognizeEntitiesResult',
- 'DetectLanguageResult',
- 'CategorizedEntity',
- 'TextAnalyticsError',
- 'TextAnalyticsWarning',
- 'ExtractKeyPhrasesResult',
- 'RecognizeLinkedEntitiesResult',
- 'AnalyzeSentimentResult',
- 'TextDocumentStatistics',
- 'DocumentError',
- 'LinkedEntity',
- 'LinkedEntityMatch',
- 'TextDocumentBatchStatistics',
- 'SentenceSentiment',
- 'SentimentConfidenceScores'
-]
-
-__version__ = VERSION
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py
deleted file mode 100644
index cf79ffd26524..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_base_client.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from azure.core.pipeline.policies import AzureKeyCredentialPolicy
-from azure.core.credentials import AzureKeyCredential
-from ._policies import TextAnalyticsResponseHookPolicy
-from ._user_agent import USER_AGENT
-from ._generated import TextAnalyticsClient
-
-def _authentication_policy(credential):
- authentication_policy = None
- if credential is None:
- raise ValueError("Parameter 'credential' must not be None.")
- if isinstance(credential, AzureKeyCredential):
- authentication_policy = AzureKeyCredentialPolicy(
- name="Ocp-Apim-Subscription-Key", credential=credential
- )
- elif credential is not None and not hasattr(credential, "get_token"):
- raise TypeError("Unsupported credential: {}. Use an instance of AzureKeyCredential "
- "or a token credential from azure.identity".format(type(credential)))
- return authentication_policy
-
-
-class TextAnalyticsClientBase(object):
- def __init__(self, endpoint, credential, **kwargs):
- self._client = TextAnalyticsClient(
- endpoint=endpoint,
- credential=credential,
- sdk_moniker=USER_AGENT,
- authentication_policy=_authentication_policy(credential),
- custom_hook_policy=TextAnalyticsResponseHookPolicy(**kwargs),
- **kwargs
- )
-
-
- def __enter__(self):
- self._client.__enter__() # pylint:disable=no-member
- return self
-
- def __exit__(self, *args):
- self._client.__exit__(*args) # pylint:disable=no-member
-
- def close(self):
- # type: () -> None
- """Close sockets opened by the client.
- Calling this method is unnecessary when using the client as a context manager.
- """
- self._client.close()
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_configuration.py
deleted file mode 100644
index 4870417a532a..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_configuration.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from typing import TYPE_CHECKING
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
- from typing import Any
-
- from azure.core.credentials import TokenCredential
-
-VERSION = "unknown"
-
-class TextAnalyticsClientConfiguration(Configuration):
- """Configuration for TextAnalyticsClient.
-
- Note that all parameters used to create this instance are saved as instance
- attributes.
-
- :param credential: Credential needed for the client to connect to Azure.
- :type credential: ~azure.core.credentials.TokenCredential
- :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).
- :type endpoint: str
- """
-
- def __init__(
- self,
- credential, # type: "TokenCredential"
- endpoint, # type: str
- **kwargs # type: Any
- ):
- # type: (...) -> None
- if credential is None:
- raise ValueError("Parameter 'credential' must not be None.")
- if endpoint is None:
- raise ValueError("Parameter 'endpoint' must not be None.")
- super(TextAnalyticsClientConfiguration, self).__init__(**kwargs)
-
- self.credential = credential
- self.endpoint = endpoint
- self.credential_scopes = ['https://cognitiveservices.azure.com/.default']
- self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
- kwargs.setdefault('sdk_moniker', 'ai-textanalytics/{}'.format(VERSION))
- self._configure(**kwargs)
-
- def _configure(
- self,
- **kwargs # type: Any
- ):
- # type: (...) -> None
- self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
- self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
- self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
- self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
- self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
- self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
- self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
- self.authentication_policy = kwargs.get('authentication_policy')
- if self.credential and not self.authentication_policy:
- self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py
deleted file mode 100644
index d54e09733c38..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/_text_analytics_client.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from typing import TYPE_CHECKING
-
-from azure.core import PipelineClient
-from msrest import Deserializer, Serializer
-
-if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
- from typing import Any
-
-from ._configuration import TextAnalyticsClientConfiguration
-from .operations import TextAnalyticsClientOperationsMixin
-from . import models
-
-
-class TextAnalyticsClient(TextAnalyticsClientOperationsMixin):
- """The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview.
-
- :param credential: Credential needed for the client to connect to Azure.
- :type credential: ~azure.core.credentials.TokenCredential
- :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).
- :type endpoint: str
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
- """
-
- def __init__(
- self,
- credential, # type: "TokenCredential"
- endpoint, # type: str
- **kwargs # type: Any
- ):
- # type: (...) -> None
- base_url = '{Endpoint}/text/analytics/v3.0'
- self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs)
- self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
-
- client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
- self._serialize = Serializer(client_models)
- self._deserialize = Deserializer(client_models)
-
-
- def close(self):
- # type: () -> None
- self._client.close()
-
- def __enter__(self):
- # type: () -> TextAnalyticsClient
- self._client.__enter__()
- return self
-
- def __exit__(self, *exc_details):
- # type: (Any) -> None
- self._client.__exit__(*exc_details)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_configuration_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_configuration_async.py
deleted file mode 100644
index 4865a1884975..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_configuration_async.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from typing import Any, TYPE_CHECKING
-
-from azure.core.configuration import Configuration
-from azure.core.pipeline import policies
-
-if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
- from azure.core.credentials import TokenCredential
-
-VERSION = "unknown"
-
-class TextAnalyticsClientConfiguration(Configuration):
- """Configuration for TextAnalyticsClient.
-
- Note that all parameters used to create this instance are saved as instance
- attributes.
-
- :param credential: Credential needed for the client to connect to Azure.
- :type credential: ~azure.core.credentials_async.AsyncTokenCredential
- :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).
- :type endpoint: str
- """
-
- def __init__(
- self,
- credential: "AsyncTokenCredential",
- endpoint: str,
- **kwargs: Any
- ) -> None:
- if credential is None:
- raise ValueError("Parameter 'credential' must not be None.")
- if endpoint is None:
- raise ValueError("Parameter 'endpoint' must not be None.")
- super(TextAnalyticsClientConfiguration, self).__init__(**kwargs)
-
- self.credential = credential
- self.endpoint = endpoint
- self.credential_scopes = ['https://cognitiveservices.azure.com/.default']
- self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
- kwargs.setdefault('sdk_moniker', 'ai-textanalytics/{}'.format(VERSION))
- self._configure(**kwargs)
-
- def _configure(
- self,
- **kwargs: Any
- ) -> None:
- self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
- self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
- self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
- self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
- self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
- self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
- self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
- self.authentication_policy = kwargs.get('authentication_policy')
- if self.credential and not self.authentication_policy:
- self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client_async.py
deleted file mode 100644
index de28032b0db2..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/_text_analytics_client_async.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-
-from typing import Any
-
-from azure.core import AsyncPipelineClient
-from msrest import Deserializer, Serializer
-
-from ._configuration_async import TextAnalyticsClientConfiguration
-from .operations_async import TextAnalyticsClientOperationsMixin
-from .. import models
-
-
-class TextAnalyticsClient(TextAnalyticsClientOperationsMixin):
- """The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview.
-
- :param credential: Credential needed for the client to connect to Azure.
- :type credential: ~azure.core.credentials_async.AsyncTokenCredential
- :param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus.api.cognitive.microsoft.com).
- :type endpoint: str
- :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
- """
-
- def __init__(
- self,
- credential: "AsyncTokenCredential",
- endpoint: str,
- **kwargs: Any
- ) -> None:
- base_url = '{Endpoint}/text/analytics/v3.0'
- self._config = TextAnalyticsClientConfiguration(credential, endpoint, **kwargs)
- self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
-
- client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
- self._serialize = Serializer(client_models)
- self._deserialize = Deserializer(client_models)
-
-
- async def close(self) -> None:
- await self._client.close()
-
- async def __aenter__(self) -> "TextAnalyticsClient":
- await self._client.__aenter__()
- return self
-
- async def __aexit__(self, *exc_details) -> None:
- await self._client.__aexit__(*exc_details)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/_text_analytics_client_operations_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/_text_analytics_client_operations_async.py
deleted file mode 100644
index d09be1dbc240..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/_text_analytics_client_operations_async.py
+++ /dev/null
@@ -1,380 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
-import warnings
-
-from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
-from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
-
-from ... import models
-
-T = TypeVar('T')
-ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
-
-class TextAnalyticsClientOperationsMixin:
-
- async def entities_recognition_general(
- self,
- documents: List["models.MultiLanguageInput"],
- model_version: Optional[str] = None,
- show_stats: Optional[bool] = None,
- **kwargs
- ) -> "models.EntitiesResult":
- """The API returns a list of general named entities in a given document. For the list of supported entity types, check :code:`Supported Entity Types in Text Analytics API`. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Named Entity Recognition.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: EntitiesResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.EntitiesResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.EntitiesResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.entities_recognition_general.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('EntitiesResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- entities_recognition_general.metadata = {'url': '/entities/recognition/general'} # type: ignore
-
- async def entities_linking(
- self,
- documents: List["models.MultiLanguageInput"],
- model_version: Optional[str] = None,
- show_stats: Optional[bool] = None,
- **kwargs
- ) -> "models.EntityLinkingResult":
- """The API returns a list of recognized entities with links to a well-known knowledge base. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Linked entities from a well-known knowledge base.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: EntityLinkingResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.EntityLinkingResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.EntityLinkingResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.entities_linking.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('EntityLinkingResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- entities_linking.metadata = {'url': '/entities/linking'} # type: ignore
-
- async def key_phrases(
- self,
- documents: List["models.MultiLanguageInput"],
- model_version: Optional[str] = None,
- show_stats: Optional[bool] = None,
- **kwargs
- ) -> "models.KeyPhraseResult":
- """The API returns a list of strings denoting the key phrases in the input text. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Key Phrases.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KeyPhraseResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.KeyPhraseResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.KeyPhraseResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.key_phrases.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('KeyPhraseResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- key_phrases.metadata = {'url': '/keyPhrases'} # type: ignore
-
- async def languages(
- self,
- documents: List["models.LanguageInput"],
- model_version: Optional[str] = None,
- show_stats: Optional[bool] = None,
- **kwargs
- ) -> "models.LanguageResult":
- """The API returns the detected language and a numeric score between 0 and 1. Scores close to 1 indicate 100% certainty that the identified language is true. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Detect Language.
-
- :param documents:
- :type documents: list[~azure.ai.textanalytics.models.LanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: LanguageResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.LanguageResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.LanguageResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.LanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.languages.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'LanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('LanguageResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- languages.metadata = {'url': '/languages'} # type: ignore
-
- async def sentiment(
- self,
- documents: List["models.MultiLanguageInput"],
- model_version: Optional[str] = None,
- show_stats: Optional[bool] = None,
- **kwargs
- ) -> "models.SentimentResponse":
- """The API returns a sentiment prediction, as well as sentiment scores for each sentiment class (Positive, Negative, and Neutral) for the document and each sentence within it. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Sentiment.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: SentimentResponse or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.SentimentResponse
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.SentimentResponse"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.sentiment.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('SentimentResponse', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- sentiment.metadata = {'url': '/sentiment'} # type: ignore
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/_text_analytics_client_operations.py
deleted file mode 100644
index 77c33a1778c7..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/_text_analytics_client_operations.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# coding=utf-8
-# --------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
-# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
-# --------------------------------------------------------------------------
-from typing import TYPE_CHECKING
-import warnings
-
-from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
-from azure.core.pipeline import PipelineResponse
-from azure.core.pipeline.transport import HttpRequest, HttpResponse
-
-from .. import models
-
-if TYPE_CHECKING:
- # pylint: disable=unused-import,ungrouped-imports
- from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
-
- T = TypeVar('T')
- ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
-
-class TextAnalyticsClientOperationsMixin(object):
-
- def entities_recognition_general(
- self,
- documents, # type: List["models.MultiLanguageInput"]
- model_version=None, # type: Optional[str]
- show_stats=None, # type: Optional[bool]
- **kwargs # type: Any
- ):
- # type: (...) -> "models.EntitiesResult"
- """The API returns a list of general named entities in a given document. For the list of supported entity types, check :code:`Supported Entity Types in Text Analytics API`. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Named Entity Recognition.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: EntitiesResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.EntitiesResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.EntitiesResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.entities_recognition_general.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('EntitiesResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- entities_recognition_general.metadata = {'url': '/entities/recognition/general'} # type: ignore
-
- def entities_linking(
- self,
- documents, # type: List["models.MultiLanguageInput"]
- model_version=None, # type: Optional[str]
- show_stats=None, # type: Optional[bool]
- **kwargs # type: Any
- ):
- # type: (...) -> "models.EntityLinkingResult"
- """The API returns a list of recognized entities with links to a well-known knowledge base. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Linked entities from a well-known knowledge base.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: EntityLinkingResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.EntityLinkingResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.EntityLinkingResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.entities_linking.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('EntityLinkingResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- entities_linking.metadata = {'url': '/entities/linking'} # type: ignore
-
- def key_phrases(
- self,
- documents, # type: List["models.MultiLanguageInput"]
- model_version=None, # type: Optional[str]
- show_stats=None, # type: Optional[bool]
- **kwargs # type: Any
- ):
- # type: (...) -> "models.KeyPhraseResult"
- """The API returns a list of strings denoting the key phrases in the input text. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Key Phrases.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: KeyPhraseResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.KeyPhraseResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.KeyPhraseResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.key_phrases.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('KeyPhraseResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- key_phrases.metadata = {'url': '/keyPhrases'} # type: ignore
-
- def languages(
- self,
- documents, # type: List["models.LanguageInput"]
- model_version=None, # type: Optional[str]
- show_stats=None, # type: Optional[bool]
- **kwargs # type: Any
- ):
- # type: (...) -> "models.LanguageResult"
- """The API returns the detected language and a numeric score between 0 and 1. Scores close to 1 indicate 100% certainty that the identified language is true. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Detect Language.
-
- :param documents:
- :type documents: list[~azure.ai.textanalytics.models.LanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: LanguageResult or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.LanguageResult
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.LanguageResult"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.LanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.languages.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'LanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('LanguageResult', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- languages.metadata = {'url': '/languages'} # type: ignore
-
- def sentiment(
- self,
- documents, # type: List["models.MultiLanguageInput"]
- model_version=None, # type: Optional[str]
- show_stats=None, # type: Optional[bool]
- **kwargs # type: Any
- ):
- # type: (...) -> "models.SentimentResponse"
- """The API returns a sentiment prediction, as well as sentiment scores for each sentiment class (Positive, Negative, and Neutral) for the document and each sentence within it. See the :code:`Supported languages in Text Analytics API` for the list of enabled languages.
-
- Sentiment.
-
- :param documents: The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
- :param model_version: (Optional) This value indicates which model will be used for scoring. If
- a model-version is not specified, the API should default to the latest, non-preview version.
- :type model_version: str
- :param show_stats: (Optional) if set to true, response will contain input and document level
- statistics.
- :type show_stats: bool
- :keyword callable cls: A custom type or function that will be passed the direct response
- :return: SentimentResponse or the result of cls(response)
- :rtype: ~azure.ai.textanalytics.models.SentimentResponse
- :raises: ~azure.core.exceptions.HttpResponseError
- """
- cls = kwargs.pop('cls', None) # type: ClsType["models.SentimentResponse"]
- error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
- error_map.update(kwargs.pop('error_map', {}))
-
- _input = models.MultiLanguageBatchInput(documents=documents)
- content_type = kwargs.pop("content_type", "application/json")
-
- # Construct URL
- url = self.sentiment.metadata['url'] # type: ignore
- path_format_arguments = {
- 'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
- }
- url = self._client.format_url(url, **path_format_arguments)
-
- # Construct parameters
- query_parameters = {} # type: Dict[str, Any]
- if model_version is not None:
- query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
- if show_stats is not None:
- query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
-
- # Construct headers
- header_parameters = {} # type: Dict[str, Any]
- header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
- header_parameters['Accept'] = 'application/json'
-
- # Construct and send request
- body_content_kwargs = {} # type: Dict[str, Any]
- body_content = self._serialize.body(_input, 'MultiLanguageBatchInput')
- body_content_kwargs['content'] = body_content
- request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
-
- pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
- response = pipeline_response.http_response
-
- if response.status_code not in [200]:
- map_error(status_code=response.status_code, response=response, error_map=error_map)
- error = self._deserialize(models.TextAnalyticsError, response)
- raise HttpResponseError(response=response, model=error)
-
- deserialized = self._deserialize('SentimentResponse', pipeline_response)
-
- if cls:
- return cls(pipeline_response, deserialized, {})
-
- return deserialized
- sentiment.metadata = {'url': '/sentiment'} # type: ignore
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/py.typed b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/py.typed
deleted file mode 100644
index e5aff4f83af8..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/py.typed
+++ /dev/null
@@ -1 +0,0 @@
-# Marker file for PEP 561.
\ No newline at end of file
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py
deleted file mode 100644
index 692178449da8..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py
+++ /dev/null
@@ -1,688 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from ._generated.models._models import LanguageInput
-from ._generated.models._models import MultiLanguageInput
-
-
-class DictMixin(object):
-
- def __setitem__(self, key, item):
- self.__dict__[key] = item
-
- def __getitem__(self, key):
- return self.__dict__[key]
-
- def __repr__(self):
- return str(self)
-
- def __len__(self):
- return len(self.keys())
-
- def __delitem__(self, key):
- self.__dict__[key] = None
-
- def __eq__(self, other):
- """Compare objects by comparing all attributes."""
- if isinstance(other, self.__class__):
- return self.__dict__ == other.__dict__
- return False
-
- def __ne__(self, other):
- """Compare objects by comparing all attributes."""
- return not self.__eq__(other)
-
- def __str__(self):
- return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
-
- def has_key(self, k):
- return k in self.__dict__
-
- def update(self, *args, **kwargs):
- return self.__dict__.update(*args, **kwargs)
-
- def keys(self):
- return [k for k in self.__dict__ if not k.startswith('_')]
-
- def values(self):
- return [v for k, v in self.__dict__.items() if not k.startswith('_')]
-
- def items(self):
- return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
-
- def get(self, key, default=None):
- if key in self.__dict__:
- return self.__dict__[key]
- return default
-
-
-class DetectedLanguage(DictMixin):
- """DetectedLanguage contains the predicted language found in text,
- its confidence score, and ISO 639-1 representation.
-
- :param name: Long name of a detected language (e.g. English,
- French).
- :type name: str
- :param iso6391_name: A two letter representation of the detected
- language according to the ISO 639-1 standard (e.g. en, fr).
- :type iso6391_name: str
- :param confidence_score: A confidence score between 0 and 1. Scores close
- to 1 indicate 100% certainty that the identified language is true.
- :type confidence_score: float
- """
-
- def __init__(self, **kwargs):
- self.name = kwargs.get("name", None)
- self.iso6391_name = kwargs.get("iso6391_name", None)
- self.confidence_score = kwargs.get("confidence_score", None)
-
- @classmethod
- def _from_generated(cls, language):
- return cls(
- name=language.name, iso6391_name=language.iso6391_name, confidence_score=language.confidence_score
- )
-
- def __repr__(self):
- return "DetectedLanguage(name={}, iso6391_name={}, confidence_score={})" \
- .format(self.name, self.iso6391_name, self.confidence_score)[:1024]
-
-
-class RecognizeEntitiesResult(DictMixin):
- """RecognizeEntitiesResult is a result object which contains
- the recognized entities from a particular document.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param entities: Recognized entities in the document.
- :type entities:
- list[~azure.ai.textanalytics.CategorizedEntity]
- :param warnings: Warnings encountered while processing document. Results will still be returned
- if there are warnings, but they may not be fully accurate.
- :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
- :param statistics: If show_stats=true was specified in the request this
- field will contain information about the document payload.
- :type statistics:
- ~azure.ai.textanalytics.TextDocumentStatistics
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always False for an instance of a RecognizeEntitiesResult.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.entities = kwargs.get("entities", None)
- self.warnings = kwargs.get("warnings", [])
- self.statistics = kwargs.get("statistics", None)
- self.is_error = False
-
- def __repr__(self):
- return "RecognizeEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \
- .format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
-
-
-class DetectLanguageResult(DictMixin):
- """DetectLanguageResult is a result object which contains
- the detected language of a particular document.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param primary_language: The primary language detected in the document.
- :type primary_language: ~azure.ai.textanalytics.DetectedLanguage
- :param warnings: Warnings encountered while processing document. Results will still be returned
- if there are warnings, but they may not be fully accurate.
- :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
- :param statistics: If show_stats=true was specified in the request this
- field will contain information about the document payload.
- :type statistics:
- ~azure.ai.textanalytics.TextDocumentStatistics
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always False for an instance of a DetectLanguageResult.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.primary_language = kwargs.get("primary_language", None)
- self.warnings = kwargs.get("warnings", [])
- self.statistics = kwargs.get("statistics", None)
- self.is_error = False
-
- def __repr__(self):
- return "DetectLanguageResult(id={}, primary_language={}, warnings={}, statistics={}, "\
- "is_error={})".format(self.id, repr(self.primary_language), repr(self.warnings),
- repr(self.statistics), self.is_error)[:1024]
-
-
-class CategorizedEntity(DictMixin):
- """CategorizedEntity contains information about a particular
- entity found in text.
-
- :param text: Entity text as appears in the request.
- :type text: str
- :param category: Entity category, such as Person/Location/Org/SSN etc
- :type category: str
- :param subcategory: Entity subcategory, such as Age/Year/TimeRange etc
- :type subcategory: str
- :param confidence_score: Confidence score between 0 and 1 of the extracted
- entity.
- :type confidence_score: float
- """
-
- def __init__(self, **kwargs):
- self.text = kwargs.get('text', None)
- self.category = kwargs.get('category', None)
- self.subcategory = kwargs.get('subcategory', None)
- self.confidence_score = kwargs.get('confidence_score', None)
-
- @classmethod
- def _from_generated(cls, entity):
- return cls(
- text=entity.text,
- category=entity.category,
- subcategory=entity.subcategory,
- confidence_score=entity.confidence_score,
- )
-
- def __repr__(self):
- return "CategorizedEntity(text={}, category={}, subcategory={}, confidence_score={})".format(
- self.text, self.category, self.subcategory, self.confidence_score
- )[:1024]
-
-
-class TextAnalyticsError(DictMixin):
- """TextAnalyticsError contains the error code, message, and
- other details that explain why the batch or individual document
- failed to be processed by the service.
-
- :param code: Error code. Possible values include:
- 'invalidRequest', 'invalidArgument', 'internalServerError',
- 'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat',
- 'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
- 'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
- :type code: str
- :param message: Error message.
- :type message: str
- :param target: Error target.
- :type target: str
- """
-
- def __init__(self, **kwargs):
- self.code = kwargs.get('code', None)
- self.message = kwargs.get('message', None)
- self.target = kwargs.get('target', None)
-
- @classmethod
- def _from_generated(cls, err):
- if err.innererror:
- return cls(
- code=err.innererror.code,
- message=err.innererror.message,
- target=err.innererror.target
- )
- return cls(
- code=err.code,
- message=err.message,
- target=err.target
- )
-
- def __repr__(self):
- return "TextAnalyticsError(code={}, message={}, target={})" \
- .format(self.code, self.message, self.target)[:1024]
-
-class TextAnalyticsWarning(DictMixin):
- """TextAnalyticsWarning contains the warning code and message that explains why
- the response has a warning.
-
- :param code: Warning code. Possible values include: 'LongWordsInDocument',
- 'DocumentTruncated'.
- :type code: str
- :param message: Warning message.
- :type message: str
- """
-
- def __init__(self, **kwargs):
- self.code = kwargs.get('code', None)
- self.message = kwargs.get('message', None)
-
- @classmethod
- def _from_generated(cls, warning):
- return cls(
- code=warning.code,
- message=warning.message,
- )
-
- def __repr__(self):
- return "TextAnalyticsWarning(code={}, message={})" \
- .format(self.code, self.message)[:1024]
-
-
-class ExtractKeyPhrasesResult(DictMixin):
- """ExtractKeyPhrasesResult is a result object which contains
- the key phrases found in a particular document.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param key_phrases: A list of representative words or phrases.
- The number of key phrases returned is proportional to the number of words
- in the input document.
- :type key_phrases: list[str]
- :param warnings: Warnings encountered while processing document. Results will still be returned
- if there are warnings, but they may not be fully accurate.
- :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
- :param statistics: If show_stats=true was specified in the request this
- field will contain information about the document payload.
- :type statistics:
- ~azure.ai.textanalytics.TextDocumentStatistics
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always False for an instance of a ExtractKeyPhrasesResult.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.key_phrases = kwargs.get("key_phrases", None)
- self.warnings = kwargs.get("warnings", [])
- self.statistics = kwargs.get("statistics", None)
- self.is_error = False
-
- def __repr__(self):
- return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})" \
- .format(self.id, self.key_phrases, repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
-
-
-class RecognizeLinkedEntitiesResult(DictMixin):
- """RecognizeLinkedEntitiesResult is a result object which contains
- links to a well-known knowledge base, like for example, Wikipedia or Bing.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param entities: Recognized well-known entities in the document.
- :type entities:
- list[~azure.ai.textanalytics.LinkedEntity]
- :param warnings: Warnings encountered while processing document. Results will still be returned
- if there are warnings, but they may not be fully accurate.
- :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
- :param statistics: If show_stats=true was specified in the request this
- field will contain information about the document payload.
- :type statistics:
- ~azure.ai.textanalytics.TextDocumentStatistics
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always False for an instance of a RecognizeLinkedEntitiesResult.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.entities = kwargs.get("entities", None)
- self.warnings = kwargs.get("warnings", [])
- self.statistics = kwargs.get("statistics", None)
- self.is_error = False
-
- def __repr__(self):
- return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \
- .format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
-
-
-class AnalyzeSentimentResult(DictMixin):
- """AnalyzeSentimentResult is a result object which contains
- the overall predicted sentiment and confidence scores for your document
- and a per-sentence sentiment prediction with scores.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param sentiment: Predicted sentiment for document (Negative,
- Neutral, Positive, or Mixed). Possible values include: 'positive',
- 'neutral', 'negative', 'mixed'
- :type sentiment: str
- :param warnings: Warnings encountered while processing document. Results will still be returned
- if there are warnings, but they may not be fully accurate.
- :type warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
- :param statistics: If show_stats=true was specified in the request this
- field will contain information about the document payload.
- :type statistics:
- ~azure.ai.textanalytics.TextDocumentStatistics
- :param confidence_scores: Document level sentiment confidence
- scores between 0 and 1 for each sentiment label.
- :type confidence_scores:
- ~azure.ai.textanalytics.SentimentConfidenceScores
- :param sentences: Sentence level sentiment analysis.
- :type sentences:
- list[~azure.ai.textanalytics.SentenceSentiment]
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always False for an instance of a AnalyzeSentimentResult.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.sentiment = kwargs.get("sentiment", None)
- self.warnings = kwargs.get("warnings", [])
- self.statistics = kwargs.get("statistics", None)
- self.confidence_scores = kwargs.get("confidence_scores", None)
- self.sentences = kwargs.get("sentences", None)
- self.is_error = False
-
- def __repr__(self):
- return "AnalyzeSentimentResult(id={}, sentiment={}, warnings={}, statistics={}, confidence_scores={}, "\
- "sentences={}, is_error={})".format(
- self.id, self.sentiment, repr(self.warnings), repr(self.statistics),
- repr(self.confidence_scores), repr(self.sentences), self.is_error)[:1024]
-
-
-class TextDocumentStatistics(DictMixin):
- """TextDocumentStatistics contains information about
- the document payload.
-
- :param character_count: Number of text elements recognized in
- the document.
- :type character_count: int
- :param transaction_count: Number of transactions for the
- document.
- :type transaction_count: int
- """
-
- def __init__(self, **kwargs):
- self.character_count = kwargs.get("character_count", None)
- self.transaction_count = kwargs.get("transaction_count", None)
-
- @classmethod
- def _from_generated(cls, stats):
- if stats is None:
- return None
- return cls(
- character_count=stats.characters_count,
- transaction_count=stats.transactions_count,
- )
-
- def __repr__(self):
- return "TextDocumentStatistics(character_count={}, transaction_count={})" \
- .format(self.character_count, self.transaction_count)[:1024]
-
-
-class DocumentError(DictMixin):
- """DocumentError is an error object which represents an error on
- the individual document.
-
- :param id: Unique, non-empty document identifier that matches the
- document id that was passed in with the request. If not specified
- in the request, an id is assigned for the document.
- :type id: str
- :param error: The document error.
- :type error: ~azure.ai.textanalytics.TextAnalyticsError
- :param bool is_error: Boolean check for error item when iterating over list of
- results. Always True for an instance of a DocumentError.
- """
-
- def __init__(self, **kwargs):
- self.id = kwargs.get("id", None)
- self.error = kwargs.get("error", None)
- self.is_error = True
-
- def __getattr__(self, attr):
- result_set = set()
- result_set.update(
- RecognizeEntitiesResult().keys()
- + DetectLanguageResult().keys() + RecognizeLinkedEntitiesResult().keys()
- + AnalyzeSentimentResult().keys() + ExtractKeyPhrasesResult().keys()
- )
- result_attrs = result_set.difference(DocumentError().keys())
- if attr in result_attrs:
- raise AttributeError(
- "'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
- "Document Id: {}\nError: {} - {}\n".
- format(attr, self.id, self.error.code, self.error.message)
- )
- raise AttributeError("'DocumentError' object has no attribute '{}'".format(attr))
-
- @classmethod
- def _from_generated(cls, doc_err):
- return cls(
- id=doc_err.id,
- error=TextAnalyticsError._from_generated(doc_err.error), # pylint: disable=protected-access
- is_error=True
- )
-
- def __repr__(self):
- return "DocumentError(id={}, error={}, is_error={})" \
- .format(self.id, repr(self.error), self.is_error)[:1024]
-
-
-class DetectLanguageInput(LanguageInput):
- """The input document to be analyzed for detecting language.
-
- :param id: Required. Unique, non-empty document identifier.
- :type id: str
- :param text: Required. The input text to process.
- :type text: str
- :param country_hint: A country hint to help better detect
- the language of the text. Accepts two letter country codes
- specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
- in the string "none" to not use a country_hint.
- :type country_hint: str
- """
-
- def __init__(self, **kwargs):
- super(DetectLanguageInput, self).__init__(**kwargs)
- self.id = kwargs.get("id", None)
- self.text = kwargs.get("text", None)
- self.country_hint = kwargs.get("country_hint", None)
-
- def __repr__(self):
- return "DetectLanguageInput(id={}, text={}, country_hint={})" \
- .format(self.id, self.text, self.country_hint)[:1024]
-
-
-class LinkedEntity(DictMixin):
- """LinkedEntity contains a link to the well-known recognized
- entity in text. The link comes from a data source like Wikipedia
- or Bing. It additionally includes all of the matches of this
- entity found in the document.
-
- :param name: Entity Linking formal name.
- :type name: str
- :param matches: List of instances this entity appears in the text.
- :type matches:
- list[~azure.ai.textanalytics.LinkedEntityMatch]
- :param language: Language used in the data source.
- :type language: str
- :param data_source_entity_id: Unique identifier of the recognized entity from the data
- source.
- :type data_source_entity_id: str
- :param url: URL to the entity's page from the data source.
- :type url: str
- :param data_source: Data source used to extract entity linking,
- such as Wiki/Bing etc.
- :type data_source: str
- """
-
- def __init__(self, **kwargs):
- self.name = kwargs.get("name", None)
- self.matches = kwargs.get("matches", None)
- self.language = kwargs.get("language", None)
- self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
- self.url = kwargs.get("url", None)
- self.data_source = kwargs.get("data_source", None)
-
- @classmethod
- def _from_generated(cls, entity):
- return cls(
- name=entity.name,
- matches=[LinkedEntityMatch._from_generated(e) for e in entity.matches], # pylint: disable=protected-access
- language=entity.language,
- data_source_entity_id=entity.id,
- url=entity.url,
- data_source=entity.data_source,
- )
-
- def __repr__(self):
- return "LinkedEntity(name={}, matches={}, language={}, data_source_entity_id={}, url={}, " \
- "data_source={})".format(self.name, repr(self.matches), self.language, self.data_source_entity_id,
- self.url, self.data_source)[:1024]
-
-
-class LinkedEntityMatch(DictMixin):
- """A match for the linked entity found in text. Provides
- the confidence score of the prediction and where the entity
- was found in the text.
-
- :param confidence_score: If a well-known item is recognized, a
- decimal number denoting the confidence level between 0 and 1 will be
- returned.
- :type confidence_score: float
- :param text: Entity text as appears in the request.
- :type text: str
- """
-
- def __init__(self, **kwargs):
- self.confidence_score = kwargs.get("confidence_score", None)
- self.text = kwargs.get("text", None)
-
- @classmethod
- def _from_generated(cls, match):
- return cls(
- confidence_score=match.confidence_score,
- text=match.text
- )
-
- def __repr__(self):
- return "LinkedEntityMatch(confidence_score={}, text={})".format(
- self.confidence_score, self.text
- )[:1024]
-
-
-class TextDocumentInput(MultiLanguageInput):
- """The input document to be analyzed by the service.
-
- :param id: Required. A unique, non-empty document identifier.
- :type id: str
- :param text: Required. The input text to process.
- :type text: str
- :param language: This is the 2 letter ISO 639-1 representation
- of a language. For example, use "en" for English; "es" for Spanish etc. If
- not set, uses "en" for English as default.
- :type language: str
- """
-
- def __init__(self, **kwargs):
- super(TextDocumentInput, self).__init__(**kwargs)
- self.id = kwargs.get("id", None)
- self.text = kwargs.get("text", None)
- self.language = kwargs.get("language", None)
-
- def __repr__(self):
- return "TextDocumentInput(id={}, text={}, language={})" \
- .format(self.id, self.text, self.language)[:1024]
-
-
-class TextDocumentBatchStatistics(DictMixin):
- """TextDocumentBatchStatistics contains information about the
- request payload. Note: This object is not returned
- in the response and needs to be retrieved by a response hook.
-
- :param document_count: Number of documents submitted in the request.
- :type document_count: int
- :param valid_document_count: Number of valid documents. This
- excludes empty, over-size limit or non-supported languages documents.
- :type valid_document_count: int
- :param erroneous_document_count: Number of invalid documents.
- This includes empty, over-size limit or non-supported languages documents.
- :type erroneous_document_count: int
- :param transaction_count: Number of transactions for the request.
- :type transaction_count: long
- """
-
- def __init__(self, **kwargs):
- self.document_count = kwargs.get("document_count", None)
- self.valid_document_count = kwargs.get("valid_document_count", None)
- self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
- self.transaction_count = kwargs.get("transaction_count", None)
-
- @classmethod
- def _from_generated(cls, statistics):
- if statistics is None:
- return None
- return cls(
- document_count=statistics["documentsCount"],
- valid_document_count=statistics["validDocumentsCount"],
- erroneous_document_count=statistics["erroneousDocumentsCount"],
- transaction_count=statistics["transactionsCount"],
- )
-
- def __repr__(self):
- return "TextDocumentBatchStatistics(document_count={}, valid_document_count={}, erroneous_document_count={}, " \
- "transaction_count={})".format(self.document_count, self.valid_document_count,
- self.erroneous_document_count, self.transaction_count)[:1024]
-
-
-class SentenceSentiment(DictMixin):
- """SentenceSentiment contains the predicted sentiment and
- confidence scores for each individual sentence in the document.
-
- :param text: The sentence text.
- :type text: str
- :param sentiment: The predicted Sentiment for the sentence.
- Possible values include: 'positive', 'neutral', 'negative'
- :type sentiment: str
- :param confidence_scores: The sentiment confidence score between 0
- and 1 for the sentence for all labels.
- :type confidence_scores:
- ~azure.ai.textanalytics.SentimentConfidenceScores
- """
-
- def __init__(self, **kwargs):
- self.text = kwargs.get("text", None)
- self.sentiment = kwargs.get("sentiment", None)
- self.confidence_scores = kwargs.get("confidence_scores", None)
-
- @classmethod
- def _from_generated(cls, sentence):
- return cls(
- text=sentence.text,
- sentiment=sentence.sentiment,
- confidence_scores=SentimentConfidenceScores._from_generated(sentence.confidence_scores), # pylint: disable=protected-access
- )
-
- def __repr__(self):
- return "SentenceSentiment(text={}, sentiment={}, confidence_scores={})".format(
- self.text,
- self.sentiment,
- repr(self.confidence_scores)
- )[:1024]
-
-
-class SentimentConfidenceScores(DictMixin):
- """The confidence scores (Softmax scores) between 0 and 1.
- Higher values indicate higher confidence.
-
- :param positive: Positive score.
- :type positive: float
- :param neutral: Neutral score.
- :type neutral: float
- :param negative: Negative score.
- :type negative: float
- """
-
- def __init__(self, **kwargs):
- self.positive = kwargs.get('positive', None)
- self.neutral = kwargs.get('neutral', None)
- self.negative = kwargs.get('negative', None)
-
- @classmethod
- def _from_generated(cls, score):
- return cls(
- positive=score.positive,
- neutral=score.neutral,
- negative=score.negative
- )
-
- def __repr__(self):
- return "SentimentConfidenceScores(positive={}, neutral={}, negative={})" \
- .format(self.positive, self.neutral, self.negative)[:1024]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_policies.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_policies.py
deleted file mode 100644
index be9292570474..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_policies.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-from ._models import TextDocumentBatchStatistics
-
-
-class TextAnalyticsResponseHookPolicy(SansIOHTTPPolicy):
- def __init__(self, **kwargs):
- self._response_callback = kwargs.get("raw_response_hook")
- super(TextAnalyticsResponseHookPolicy, self).__init__()
-
- def on_request(self, request):
- self._response_callback = request.context.options.pop("raw_response_hook", self._response_callback)
-
- def on_response(self, request, response):
- if self._response_callback:
- data = ContentDecodePolicy.deserialize_from_http_generics(response.http_response)
- statistics = data.get("statistics", None)
- model_version = data.get("modelVersion", None)
-
- batch_statistics = TextDocumentBatchStatistics._from_generated(statistics) # pylint: disable=protected-access
- response.statistics = batch_statistics
- response.model_version = model_version
- response.raw_response = data
- self._response_callback(response)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py
deleted file mode 100644
index 6d901cf2a7a0..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-
-import six
-
-from ._models import (
- DetectLanguageInput,
- TextDocumentInput,
-)
-
-
-def _validate_batch_input(documents, hint, whole_batch_hint):
- """Validate that batch input has either all string docs
- or dict/DetectLanguageInput/TextDocumentInput, not a mix of both.
- Assign country and language hints on a whole batch or per-item
- basis.
-
- :param list documents: The input documents.
- :return: A list of DetectLanguageInput or TextDocumentInput
- """
- if not documents:
- raise ValueError("Input documents can not be empty or None")
-
- if isinstance(documents, six.string_types):
- raise TypeError("Input documents cannot be a string.")
-
- if isinstance(documents, dict):
- raise TypeError("Input documents cannot be a dict")
-
- if not all(isinstance(x, six.string_types) for x in documents):
- if not all(isinstance(x, (dict, TextDocumentInput, DetectLanguageInput)) for x in documents):
- raise TypeError("Mixing string and dictionary/object document input unsupported.")
-
-
- request_batch = []
- for idx, doc in enumerate(documents):
- if isinstance(doc, six.string_types):
- if hint == "country_hint" and whole_batch_hint.lower() == "none":
- whole_batch_hint = ""
- document = {"id": str(idx), hint: whole_batch_hint, "text": doc}
- request_batch.append(document)
- if isinstance(doc, dict):
- item_hint = doc.get(hint, None)
- if item_hint is None:
- doc = {"id": doc.get("id", None), hint: whole_batch_hint, "text": doc.get("text", None)}
- elif item_hint.lower() == "none":
- doc = {"id": doc.get("id", None), hint: "", "text": doc.get("text", None)}
- request_batch.append(doc)
- if isinstance(doc, TextDocumentInput):
- item_hint = doc.language
- if item_hint is None:
- doc = TextDocumentInput(id=doc.id, language=whole_batch_hint, text=doc.text)
- request_batch.append(doc)
- if isinstance(doc, DetectLanguageInput):
- item_hint = doc.country_hint
- if item_hint is None:
- doc = DetectLanguageInput(id=doc.id, country_hint=whole_batch_hint, text=doc.text)
- elif item_hint.lower() == "none":
- doc = DetectLanguageInput(id=doc.id, country_hint="", text=doc.text)
- request_batch.append(doc)
-
- return request_batch
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py
deleted file mode 100644
index 0f38ba8b8c6d..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-import json
-from azure.core.exceptions import (
- HttpResponseError,
- ClientAuthenticationError,
- ODataV4Format
-)
-from ._models import (
- RecognizeEntitiesResult,
- CategorizedEntity,
- TextDocumentStatistics,
- RecognizeLinkedEntitiesResult,
- LinkedEntity,
- ExtractKeyPhrasesResult,
- AnalyzeSentimentResult,
- SentenceSentiment,
- DetectLanguageResult,
- DetectedLanguage,
- DocumentError,
- SentimentConfidenceScores,
- TextAnalyticsError,
- TextAnalyticsWarning
-)
-
-class CSODataV4Format(ODataV4Format):
-
- def __init__(self, odata_error):
- try:
- if odata_error["error"]["innererror"]:
- super(CSODataV4Format, self).__init__(odata_error["error"]["innererror"])
- except KeyError:
- super(CSODataV4Format, self).__init__(odata_error)
-
-
-def process_batch_error(error):
- """Raise detailed error message.
- """
- raise_error = HttpResponseError
- if error.status_code == 401:
- raise_error = ClientAuthenticationError
- raise raise_error(response=error.response, error_format=CSODataV4Format)
-
-def order_results(response, combined):
- """Order results in the order the user passed them in.
-
- :param response: Used to get the original documents in the request
- :param combined: A combined list of the results | errors
- :return: In order list of results | errors (if any)
- """
- request = json.loads(response.http_response.request.body)["documents"]
- mapping = {item.id: item for item in combined}
- ordered_response = [mapping[item["id"]] for item in request]
- return ordered_response
-
-
-def prepare_result(func):
- def _get_error_code_and_message(error):
- if hasattr(error.error, 'innererror') and error.error.innererror:
- return error.error.innererror.code, error.error.innererror.message
- return error.error.code, error.error.message
-
- def _deal_with_too_many_documents(response, obj):
- # special case for now if there are too many documents in the request
- too_many_documents_errors = [
- error for error in obj.errors if error.id == ""
- ]
- if too_many_documents_errors:
- too_many_documents_error = too_many_documents_errors[0]
- response.status_code = 400
- response.reason = "Bad Request"
- code, message = _get_error_code_and_message(too_many_documents_error)
- raise HttpResponseError(
- message="({}) {}".format(code, message),
- response=response
- )
-
- def wrapper(response, obj, response_headers): # pylint: disable=unused-argument
- if obj.errors:
- _deal_with_too_many_documents(response.http_response, obj)
- combined = obj.documents + obj.errors
- results = order_results(response, combined)
- else:
- results = obj.documents
-
- for idx, item in enumerate(results):
- if hasattr(item, "error"):
- results[idx] = DocumentError(id=item.id, error=TextAnalyticsError._from_generated(item.error)) # pylint: disable=protected-access
- else:
- results[idx] = func(item)
- return results
-
- return wrapper
-
-
-@prepare_result
-def language_result(language):
- return DetectLanguageResult(
- id=language.id,
- primary_language=DetectedLanguage._from_generated(language.detected_language), # pylint: disable=protected-access
- warnings=[TextAnalyticsWarning._from_generated(w) for w in language.warnings], # pylint: disable=protected-access
- statistics=TextDocumentStatistics._from_generated(language.statistics), # pylint: disable=protected-access
- )
-
-
-@prepare_result
-def entities_result(entity):
- return RecognizeEntitiesResult(
- id=entity.id,
- entities=[CategorizedEntity._from_generated(e) for e in entity.entities], # pylint: disable=protected-access
- warnings=[TextAnalyticsWarning._from_generated(w) for w in entity.warnings], # pylint: disable=protected-access
- statistics=TextDocumentStatistics._from_generated(entity.statistics), # pylint: disable=protected-access
- )
-
-
-@prepare_result
-def linked_entities_result(entity):
- return RecognizeLinkedEntitiesResult(
- id=entity.id,
- entities=[LinkedEntity._from_generated(e) for e in entity.entities], # pylint: disable=protected-access
- warnings=[TextAnalyticsWarning._from_generated(w) for w in entity.warnings], # pylint: disable=protected-access
- statistics=TextDocumentStatistics._from_generated(entity.statistics), # pylint: disable=protected-access
- )
-
-
-@prepare_result
-def key_phrases_result(phrases):
- return ExtractKeyPhrasesResult(
- id=phrases.id,
- key_phrases=phrases.key_phrases,
- warnings=[TextAnalyticsWarning._from_generated(w) for w in phrases.warnings], # pylint: disable=protected-access
- statistics=TextDocumentStatistics._from_generated(phrases.statistics), # pylint: disable=protected-access
- )
-
-
-@prepare_result
-def sentiment_result(sentiment):
- return AnalyzeSentimentResult(
- id=sentiment.id,
- sentiment=sentiment.sentiment,
- warnings=[TextAnalyticsWarning._from_generated(w) for w in sentiment.warnings], # pylint: disable=protected-access
- statistics=TextDocumentStatistics._from_generated(sentiment.statistics), # pylint: disable=protected-access
- confidence_scores=SentimentConfidenceScores._from_generated(sentiment.confidence_scores), # pylint: disable=protected-access
- sentences=[SentenceSentiment._from_generated(s) for s in sentiment.sentences], # pylint: disable=protected-access
- )
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py
deleted file mode 100644
index 3d32c1dad8bb..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from typing import ( # pylint: disable=unused-import
- Union,
- Optional,
- Any,
- List,
- Dict,
- TYPE_CHECKING,
-)
-from azure.core.tracing.decorator import distributed_trace
-from azure.core.exceptions import HttpResponseError
-from ._base_client import TextAnalyticsClientBase
-from ._request_handlers import _validate_batch_input
-from ._response_handlers import (
- process_batch_error,
- entities_result,
- linked_entities_result,
- key_phrases_result,
- sentiment_result,
- language_result
-)
-
-if TYPE_CHECKING:
- from azure.core.credentials import TokenCredential, AzureKeyCredential
- from ._models import (
- DetectLanguageInput,
- TextDocumentInput,
- DetectLanguageResult,
- RecognizeEntitiesResult,
- RecognizeLinkedEntitiesResult,
- ExtractKeyPhrasesResult,
- AnalyzeSentimentResult,
- DocumentError,
- )
-
-
-class TextAnalyticsClient(TextAnalyticsClientBase):
- """The Text Analytics API is a suite of text analytics web services built with best-in-class
- Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
- tasks such as sentiment analysis, key phrase extraction, and language detection. No training data
- is needed to use this API - just bring your text data. This API uses advanced natural language
- processing techniques to deliver best in class predictions.
-
- Further documentation can be found in
- https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview
-
- :param str endpoint: Supported Cognitive Services or Text Analytics resource
- endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
- :param credential: Credentials needed for the client to connect to Azure.
- This can be the an instance of AzureKeyCredential if using a
- cognitive services/text analytics API key or a token credential
- from :mod:`azure.identity`.
- :type credential: :class:`~azure.core.credentials.AzureKeyCredential` or
- :class:`~azure.core.credentials.TokenCredential`
- :keyword str default_country_hint: Sets the default country_hint to use for all operations.
- Defaults to "US". If you don't want to use a country hint, pass the string "none".
- :keyword str default_language: Sets the default language to use for all operations.
- Defaults to "en".
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_authentication.py
- :start-after: [START create_ta_client_with_key]
- :end-before: [END create_ta_client_with_key]
- :language: python
- :dedent: 8
- :caption: Creating the TextAnalyticsClient with endpoint and API key.
-
- .. literalinclude:: ../samples/sample_authentication.py
- :start-after: [START create_ta_client_with_aad]
- :end-before: [END create_ta_client_with_aad]
- :language: python
- :dedent: 8
- :caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
- """
-
- def __init__(self, endpoint, credential, **kwargs):
- # type: (str, Union[AzureKeyCredential, TokenCredential], Any) -> None
- super(TextAnalyticsClient, self).__init__(
- endpoint=endpoint,
- credential=credential,
- **kwargs
- )
- self._default_language = kwargs.pop("default_language", "en")
- self._default_country_hint = kwargs.pop("default_country_hint", "US")
-
- @distributed_trace
- def detect_language( # type: ignore
- self,
- documents, # type: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]]
- **kwargs # type: Any
- ):
- # type: (...) -> List[Union[DetectLanguageResult, DocumentError]]
- """Detects Language for a batch of documents.
-
- Returns the detected language and a numeric score between zero and
- one. Scores close to one indicate 100% certainty that the identified
- language is true. See https://aka.ms/talangs for the list of enabled languages.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and country_hint on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
- `{"id": "1", "country_hint": "us", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
- :keyword str country_hint: A country hint for the entire batch. Accepts two
- letter country codes specified by ISO 3166-1 alpha-2. Per-document
- country hints will take precedence over whole batch hints. Defaults to
- "US". If you don't want to use a country hint, pass the string "none".
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document
- level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.DetectLanguageResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_detect_language.py
- :start-after: [START batch_detect_language]
- :end-before: [END batch_detect_language]
- :language: python
- :dedent: 8
- :caption: Detecting language in a batch of documents.
- """
- country_hint_arg = kwargs.pop("country_hint", None)
- country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
- docs = _validate_batch_input(documents, "country_hint", country_hint)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return self._client.languages(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", language_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace
- def recognize_entities( # type: ignore
- self,
- documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
- **kwargs # type: Any
- ):
- # type: (...) -> List[Union[RecognizeEntitiesResult, DocumentError]]
- """Entity Recognition for a batch of documents.
-
- Identifies and categorizes entities in your text as people, places,
- organizations, date/time, quantities, percentages, currencies, and more.
- For the list of supported entity types, check: https://aka.ms/taner
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list
- of dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`,
- like `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
- were passed in.
- :rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_recognize_entities.py
- :start-after: [START batch_recognize_entities]
- :end-before: [END batch_recognize_entities]
- :language: python
- :dedent: 8
- :caption: Recognize entities in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return self._client.entities_recognition_general(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", entities_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace
- def recognize_linked_entities( # type: ignore
- self,
- documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
- **kwargs # type: Any
- ):
- # type: (...) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]
- """Recognize linked entities from a well-known knowledge base for a batch of documents.
-
- Identifies and disambiguates the identity of each entity found in text (for example,
- determining whether an occurrence of the word Mars refers to the planet, or to the
- Roman god of war). Recognized entities are associated with URLs to a well-known
- knowledge base, like Wikipedia.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
- were passed in.
- :rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_recognize_linked_entities.py
- :start-after: [START batch_recognize_linked_entities]
- :end-before: [END batch_recognize_linked_entities]
- :language: python
- :dedent: 8
- :caption: Recognize linked entities in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return self._client.entities_linking(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", linked_entities_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace
- def extract_key_phrases( # type: ignore
- self,
- documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
- **kwargs # type: Any
- ):
- # type: (...) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]
- """Extract Key Phrases from a batch of documents.
-
- Returns a list of strings denoting the key phrases in the input
- text. For example, for the input text "The food was delicious and there
- were wonderful staff", the API returns the main talking points: "food"
- and "wonderful staff"
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_extract_key_phrases.py
- :start-after: [START batch_extract_key_phrases]
- :end-before: [END batch_extract_key_phrases]
- :language: python
- :dedent: 8
- :caption: Extract the key phrases in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return self._client.key_phrases(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", key_phrases_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace
- def analyze_sentiment( # type: ignore
- self,
- documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]
- **kwargs # type: Any
- ):
- # type: (...) -> List[Union[AnalyzeSentimentResult, DocumentError]]
- """Analyze sentiment for a batch of documents.
-
- Returns a sentiment prediction, as well as sentiment scores for
- each sentiment class (Positive, Negative, and Neutral) for the document
- and each sentence within it.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/sample_analyze_sentiment.py
- :start-after: [START batch_analyze_sentiment]
- :end-before: [END batch_analyze_sentiment]
- :language: python
- :dedent: 8
- :caption: Analyze sentiment in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return self._client.sentiment(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", sentiment_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_user_agent.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_user_agent.py
deleted file mode 100644
index dd1abdfe3e9a..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_user_agent.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-import platform
-from ._version import VERSION
-
-USER_AGENT = "ai-textanalytics/{} Python/{} ({})".format(
- VERSION, platform.python_version(), platform.platform()
-)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_version.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_version.py
deleted file mode 100644
index 5c9f115397ae..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_version.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-VERSION = "1.0.0b7"
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/__init__.py
deleted file mode 100644
index a3d4ff19e3d8..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from ._text_analytics_client_async import TextAnalyticsClient
-
-__all__ = [
- 'TextAnalyticsClient'
-]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_base_client_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_base_client_async.py
deleted file mode 100644
index f586dae39b9a..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_base_client_async.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-from typing import Any
-from azure.core.credentials import AzureKeyCredential
-from azure.core.pipeline.policies import AzureKeyCredentialPolicy
-from ._policies_async import AsyncTextAnalyticsResponseHookPolicy
-from .._generated.aio import TextAnalyticsClient
-from .._user_agent import USER_AGENT
-
-
-def _authentication_policy(credential):
- authentication_policy = None
- if credential is None:
- raise ValueError("Parameter 'credential' must not be None.")
- if isinstance(credential, AzureKeyCredential):
- authentication_policy = AzureKeyCredentialPolicy(
- name="Ocp-Apim-Subscription-Key", credential=credential
- )
- elif credential is not None and not hasattr(credential, "get_token"):
- raise TypeError("Unsupported credential: {}. Use an instance of AzureKeyCredential "
- "or a token credential from azure.identity".format(type(credential)))
- return authentication_policy
-
-
-class AsyncTextAnalyticsClientBase(object):
- def __init__(self, endpoint, credential, **kwargs):
- self._client = TextAnalyticsClient(
- endpoint=endpoint,
- credential=credential,
- sdk_moniker=USER_AGENT,
- authentication_policy=_authentication_policy(credential),
- custom_hook_policy=AsyncTextAnalyticsResponseHookPolicy(**kwargs),
- **kwargs
- )
-
-
- async def __aenter__(self) -> "AsyncTextAnalyticsClientBase":
- await self._client.__aenter__()
- return self
-
- async def __aexit__(self, *args: "Any") -> None:
- await self._client.__aexit__(*args)
-
- async def close(self) -> None:
- """Close sockets opened by the client.
- Calling this method is unnecessary when using the client as a context manager.
- """
- await self._client.__aexit__()
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_policies_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_policies_async.py
deleted file mode 100644
index c8ba797a8743..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_policies_async.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-import asyncio
-from azure.core.pipeline.policies import ContentDecodePolicy
-from azure.core.pipeline.policies import SansIOHTTPPolicy
-from .._models import TextDocumentBatchStatistics
-
-
-class AsyncTextAnalyticsResponseHookPolicy(SansIOHTTPPolicy):
-
- def __init__(self, **kwargs):
- self._response_callback = kwargs.get('raw_response_hook')
- super(AsyncTextAnalyticsResponseHookPolicy, self).__init__()
-
- async def on_request(self, request):
- self._response_callback = request.context.options.pop("raw_response_hook", self._response_callback)
-
- async def on_response(self, request, response):
- if self._response_callback:
- data = ContentDecodePolicy.deserialize_from_http_generics(response.http_response)
- statistics = data.get("statistics", None)
- model_version = data.get("modelVersion", None)
-
- batch_statistics = TextDocumentBatchStatistics._from_generated(statistics) # pylint: disable=protected-access
- response.statistics = batch_statistics
- response.model_version = model_version
- response.raw_response = data
- if asyncio.iscoroutine(self._response_callback):
- await self._response_callback(response)
- else:
- self._response_callback(response)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py
deleted file mode 100644
index 5c649e1f5c4d..000000000000
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# coding=utf-8
-# ------------------------------------
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-# ------------------------------------
-
-from typing import ( # pylint: disable=unused-import
- Union,
- Optional,
- Any,
- List,
- Dict,
- TYPE_CHECKING
-)
-from azure.core.tracing.decorator_async import distributed_trace_async
-from azure.core.exceptions import HttpResponseError
-from ._base_client_async import AsyncTextAnalyticsClientBase
-from .._request_handlers import _validate_batch_input
-from .._response_handlers import (
- process_batch_error,
- entities_result,
- linked_entities_result,
- key_phrases_result,
- sentiment_result,
- language_result
-)
-from .._models import (
- DetectLanguageInput,
- TextDocumentInput,
- DetectLanguageResult,
- RecognizeEntitiesResult,
- RecognizeLinkedEntitiesResult,
- ExtractKeyPhrasesResult,
- AnalyzeSentimentResult,
- DocumentError,
-)
-
-if TYPE_CHECKING:
- from azure.core.credentials_async import AsyncTokenCredential
- from azure.core.credentials import AzureKeyCredential
-
-
-class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
- """The Text Analytics API is a suite of text analytics web services built with best-in-class
- Microsoft machine learning algorithms. The API can be used to analyze unstructured text for
- tasks such as sentiment analysis, key phrase extraction, and language detection. No training data
- is needed to use this API - just bring your text data. This API uses advanced natural language
- processing techniques to deliver best in class predictions.
-
- Further documentation can be found in
- https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview
-
- :param str endpoint: Supported Cognitive Services or Text Analytics resource
- endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
- :param credential: Credentials needed for the client to connect to Azure.
- This can be the an instance of AzureKeyCredential if using a
- cognitive services/text analytics API key or a token credential
- from :mod:`azure.identity`.
- :type credential: :class:`~azure.core.credentials.AzureKeyCredential`
- or :class:`~azure.core.credentials_async.AsyncTokenCredential`
- :keyword str default_country_hint: Sets the default country_hint to use for all operations.
- Defaults to "US". If you don't want to use a country hint, pass the string "none".
- :keyword str default_language: Sets the default language to use for all operations.
- Defaults to "en".
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
- :start-after: [START create_ta_client_with_key_async]
- :end-before: [END create_ta_client_with_key_async]
- :language: python
- :dedent: 8
- :caption: Creating the TextAnalyticsClient with endpoint and API key.
-
- .. literalinclude:: ../samples/async_samples/sample_authentication_async.py
- :start-after: [START create_ta_client_with_aad_async]
- :end-before: [END create_ta_client_with_aad_async]
- :language: python
- :dedent: 8
- :caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
- """
-
- def __init__( # type: ignore
- self,
- endpoint: str,
- credential: Union["AzureKeyCredential", "AsyncTokenCredential"],
- **kwargs: Any
- ) -> None:
- super(TextAnalyticsClient, self).__init__(
- endpoint=endpoint,
- credential=credential,
- **kwargs
- )
- self._default_language = kwargs.pop("default_language", "en")
- self._default_country_hint = kwargs.pop("default_country_hint", "US")
-
- @distributed_trace_async
- async def detect_language( # type: ignore
- self,
- documents: Union[List[str], List[DetectLanguageInput], List[Dict[str, str]]],
- **kwargs: Any
- ) -> List[Union[DetectLanguageResult, DocumentError]]:
- """Detects Language for a batch of documents.
-
- Returns the detected language and a numeric score between zero and
- one. Scores close to one indicate 100% certainty that the identified
- language is true. See https://aka.ms/talangs for the list of enabled languages.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and country_hint on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.DetectLanguageInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.DetectLanguageInput`, like
- `{"id": "1", "country_hint": "us", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.DetectLanguageInput]
- :keyword str country_hint: A country hint for the entire batch. Accepts two
- letter country codes specified by ISO 3166-1 alpha-2. Per-document
- country hints will take precedence over whole batch hints. Defaults to
- "US". If you don't want to use a country hint, pass the string "none".
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document
- level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.DetectLanguageResult`
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
- were passed in.
- :rtype: list[~azure.ai.textanalytics.DetectLanguageResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_detect_language_async.py
- :start-after: [START batch_detect_language_async]
- :end-before: [END batch_detect_language_async]
- :language: python
- :dedent: 8
- :caption: Detecting language in a batch of documents.
- """
- country_hint_arg = kwargs.pop("country_hint", None)
- country_hint = country_hint_arg if country_hint_arg is not None else self._default_country_hint
- docs = _validate_batch_input(documents, "country_hint", country_hint)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return await self._client.languages(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", language_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace_async
- async def recognize_entities( # type: ignore
- self,
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
- **kwargs: Any
- ) -> List[Union[RecognizeEntitiesResult, DocumentError]]:
- """Entity Recognition for a batch of documents.
-
- Identifies and categorizes entities in your text as people, places,
- organizations, date/time, quantities, percentages, currencies, and more.
- For the list of supported entity types, check: https://aka.ms/taner
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeEntitiesResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.RecognizeEntitiesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_recognize_entities_async.py
- :start-after: [START batch_recognize_entities_async]
- :end-before: [END batch_recognize_entities_async]
- :language: python
- :dedent: 8
- :caption: Recognize entities in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return await self._client.entities_recognition_general(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", entities_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace_async
- async def recognize_linked_entities( # type: ignore
- self,
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
- **kwargs: Any
- ) -> List[Union[RecognizeLinkedEntitiesResult, DocumentError]]:
- """Recognize linked entities from a well-known knowledge base for a batch of documents.
-
- Identifies and disambiguates the identity of each entity found in text (for example,
- determining whether an occurrence of the word Mars refers to the planet, or to the
- Roman god of war). Recognized entities are associated with URLs to a well-known
- knowledge base, like Wikipedia.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.RecognizeLinkedEntitiesResult`
- and :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents
- were passed in.
- :rtype: list[~azure.ai.textanalytics.RecognizeLinkedEntitiesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_recognize_linked_entities_async.py
- :start-after: [START batch_recognize_linked_entities_async]
- :end-before: [END batch_recognize_linked_entities_async]
- :language: python
- :dedent: 8
- :caption: Recognize linked entities in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return await self._client.entities_linking(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", linked_entities_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace_async
- async def extract_key_phrases( # type: ignore
- self,
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
- **kwargs: Any
- ) -> List[Union[ExtractKeyPhrasesResult, DocumentError]]:
- """Extract Key Phrases from a batch of documents.
-
- Returns a list of strings denoting the key phrases in the input
- text. For example, for the input text "The food was delicious and there
- were wonderful staff", the API returns the main talking points: "food"
- and "wonderful staff"
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.ExtractKeyPhrasesResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.ExtractKeyPhrasesResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_extract_key_phrases_async.py
- :start-after: [START batch_extract_key_phrases_async]
- :end-before: [END batch_extract_key_phrases_async]
- :language: python
- :dedent: 8
- :caption: Extract the key phrases in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return await self._client.key_phrases(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", key_phrases_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
-
- @distributed_trace_async
- async def analyze_sentiment( # type: ignore
- self,
- documents: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]],
- **kwargs: Any
- ) -> List[Union[AnalyzeSentimentResult, DocumentError]]:
- """Analyze sentiment for a batch of documents.
-
- Returns a sentiment prediction, as well as sentiment scores for
- each sentiment class (Positive, Negative, and Neutral) for the document
- and each sentence within it.
-
- See https://docs.microsoft.com/azure/cognitive-services/text-analytics/overview#data-limits
- for document length limits, maximum batch size, and supported text encoding.
-
- :param documents: The set of documents to process as part of this batch.
- If you wish to specify the ID and language on a per-item basis you must
- use as input a list[:class:`~azure.ai.textanalytics.TextDocumentInput`] or a list of
- dict representations of :class:`~azure.ai.textanalytics.TextDocumentInput`, like
- `{"id": "1", "language": "en", "text": "hello world"}`.
- :type documents:
- list[str] or list[~azure.ai.textanalytics.TextDocumentInput]
- :keyword str language: The 2 letter ISO 639-1 representation of language for the
- entire batch. For example, use "en" for English; "es" for Spanish etc.
- If not set, uses "en" for English as default. Per-document language will
- take precedence over whole batch language. See https://aka.ms/talangs for
- supported languages in Text Analytics API.
- :keyword str model_version: This value indicates which model will
- be used for scoring, e.g. "latest", "2019-10-01". If a model-version
- is not specified, the API will default to the latest, non-preview version.
- :keyword bool show_stats: If set to true, response will contain document level statistics.
- :return: The combined list of :class:`~azure.ai.textanalytics.AnalyzeSentimentResult` and
- :class:`~azure.ai.textanalytics.DocumentError` in the order the original documents were
- passed in.
- :rtype: list[~azure.ai.textanalytics.AnalyzeSentimentResult,
- ~azure.ai.textanalytics.DocumentError]
- :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError:
-
- .. admonition:: Example:
-
- .. literalinclude:: ../samples/async_samples/sample_analyze_sentiment_async.py
- :start-after: [START batch_analyze_sentiment_async]
- :end-before: [END batch_analyze_sentiment_async]
- :language: python
- :dedent: 8
- :caption: Analyze sentiment in a batch of documents.
- """
- language_arg = kwargs.pop("language", None)
- language = language_arg if language_arg is not None else self._default_language
- docs = _validate_batch_input(documents, "language", language)
- model_version = kwargs.pop("model_version", None)
- show_stats = kwargs.pop("show_stats", False)
- try:
- return await self._client.sentiment(
- documents=docs,
- model_version=model_version,
- show_stats=show_stats,
- cls=kwargs.pop("cls", sentiment_result),
- **kwargs
- )
- except HttpResponseError as error:
- process_batch_error(error)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/__init__.py
similarity index 66%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/__init__.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/__init__.py
index bab4e1ff9cac..48d59a14cb3c 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/__init__.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/__init__.py
@@ -1,16 +1,19 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
+from ._configuration import TextAnalyticsClientConfiguration
from ._text_analytics_client import TextAnalyticsClient
-__all__ = ['TextAnalyticsClient']
+__all__ = ['TextAnalyticsClient', 'TextAnalyticsClientConfiguration']
+
+from .version import VERSION
+
+__version__ = VERSION
-try:
- from ._patch import patch_sdk
- patch_sdk()
-except ImportError:
- pass
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_configuration.py
new file mode 100644
index 000000000000..6310dc3c29fe
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_configuration.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest import Configuration
+
+from .version import VERSION
+
+
+class TextAnalyticsClientConfiguration(Configuration):
+ """Configuration for TextAnalyticsClient
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ if endpoint is None:
+ raise ValueError("Parameter 'endpoint' must not be None.")
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ base_url = '{Endpoint}/text/analytics/v3.0'
+
+ super(TextAnalyticsClientConfiguration, self).__init__(base_url)
+
+ # Starting Autorest.Python 4.0.64, make connection pool activated by default
+ self.keep_alive = True
+
+ self.add_user_agent('azure-ai-textanalytics/{}'.format(VERSION))
+
+ self.endpoint = endpoint
+ self.credentials = credentials
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_text_analytics_client.py
new file mode 100644
index 000000000000..f55d80d10bee
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/_text_analytics_client.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.service_client import SDKClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import TextAnalyticsClientConfiguration
+from .operations import TextAnalyticsClientOperationsMixin
+from . import models
+
+
+class TextAnalyticsClient(TextAnalyticsClientOperationsMixin, SDKClient):
+ """The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview
+
+ :ivar config: Configuration for client.
+ :vartype config: TextAnalyticsClientConfiguration
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ self.config = TextAnalyticsClientConfiguration(endpoint, credentials)
+ super(TextAnalyticsClient, self).__init__(self.config.credentials, self.config)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = 'v3.0'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/__init__.py
similarity index 62%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/__init__.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/__init__.py
index 474336e92e7a..dee6d449fb88 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/__init__.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/__init__.py
@@ -1,9 +1,12 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
try:
@@ -18,6 +21,7 @@
from ._models_py3 import EntitiesResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
+ from ._models_py3 import ErrorResponse, ErrorResponseException
from ._models_py3 import InnerError
from ._models_py3 import KeyPhraseResult
from ._models_py3 import LanguageBatchInput
@@ -34,33 +38,33 @@
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
except (SyntaxError, ImportError):
- from ._models import DetectedLanguage # type: ignore
- from ._models import DocumentEntities # type: ignore
- from ._models import DocumentError # type: ignore
- from ._models import DocumentKeyPhrases # type: ignore
- from ._models import DocumentLanguage # type: ignore
- from ._models import DocumentLinkedEntities # type: ignore
- from ._models import DocumentSentiment # type: ignore
- from ._models import DocumentStatistics # type: ignore
- from ._models import EntitiesResult # type: ignore
- from ._models import Entity # type: ignore
- from ._models import EntityLinkingResult # type: ignore
- from ._models import InnerError # type: ignore
- from ._models import KeyPhraseResult # type: ignore
- from ._models import LanguageBatchInput # type: ignore
- from ._models import LanguageInput # type: ignore
- from ._models import LanguageResult # type: ignore
- from ._models import LinkedEntity # type: ignore
- from ._models import Match # type: ignore
- from ._models import MultiLanguageBatchInput # type: ignore
- from ._models import MultiLanguageInput # type: ignore
- from ._models import RequestStatistics # type: ignore
- from ._models import SentenceSentiment # type: ignore
- from ._models import SentimentConfidenceScorePerLabel # type: ignore
- from ._models import SentimentResponse # type: ignore
- from ._models import TextAnalyticsError # type: ignore
- from ._models import TextAnalyticsWarning # type: ignore
-
+ from ._models import DetectedLanguage
+ from ._models import DocumentEntities
+ from ._models import DocumentError
+ from ._models import DocumentKeyPhrases
+ from ._models import DocumentLanguage
+ from ._models import DocumentLinkedEntities
+ from ._models import DocumentSentiment
+ from ._models import DocumentStatistics
+ from ._models import EntitiesResult
+ from ._models import Entity
+ from ._models import EntityLinkingResult
+ from ._models import ErrorResponse, ErrorResponseException
+ from ._models import InnerError
+ from ._models import KeyPhraseResult
+ from ._models import LanguageBatchInput
+ from ._models import LanguageInput
+ from ._models import LanguageResult
+ from ._models import LinkedEntity
+ from ._models import Match
+ from ._models import MultiLanguageBatchInput
+ from ._models import MultiLanguageInput
+ from ._models import RequestStatistics
+ from ._models import SentenceSentiment
+ from ._models import SentimentConfidenceScorePerLabel
+ from ._models import SentimentResponse
+ from ._models import TextAnalyticsError
+ from ._models import TextAnalyticsWarning
from ._text_analytics_client_enums import (
DocumentSentimentValue,
ErrorCodeValue,
@@ -81,6 +85,7 @@
'EntitiesResult',
'Entity',
'EntityLinkingResult',
+ 'ErrorResponse', 'ErrorResponseException',
'InnerError',
'KeyPhraseResult',
'LanguageBatchInput',
@@ -96,9 +101,9 @@
'SentimentResponse',
'TextAnalyticsError',
'TextAnalyticsWarning',
- 'DocumentSentimentValue',
'ErrorCodeValue',
'InnerErrorCodeValue',
- 'SentenceSentimentValue',
'WarningCodeValue',
+ 'DocumentSentimentValue',
+ 'SentenceSentimentValue',
]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models.py
similarity index 62%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models.py
index 9c178ac255dd..0371a1c86ba5 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models.py
@@ -1,27 +1,32 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
-from azure.core.exceptions import HttpResponseError
-import msrest.serialization
+from msrest.serialization import Model
+from msrest.exceptions import HttpOperationError
-class DetectedLanguage(msrest.serialization.Model):
+class DetectedLanguage(Model):
"""DetectedLanguage.
All required parameters must be populated in order to send to Azure.
- :param name: Required. Long name of a detected language (e.g. English, French).
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
:type name: str
- :param iso6391_name: Required. A two letter representation of the detected language according
- to the ISO 639-1 standard (e.g. en, fr).
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
:type iso6391_name: str
- :param confidence_score: Required. A confidence score between 0 and 1. Scores close to 1
- indicate 100% certainty that the identified language is true.
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
:type confidence_score: float
"""
@@ -37,17 +42,14 @@ class DetectedLanguage(msrest.serialization.Model):
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DetectedLanguage, self).__init__(**kwargs)
- self.name = kwargs['name']
- self.iso6391_name = kwargs['iso6391_name']
- self.confidence_score = kwargs['confidence_score']
+ self.name = kwargs.get('name', None)
+ self.iso6391_name = kwargs.get('iso6391_name', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
-class DocumentEntities(msrest.serialization.Model):
+class DocumentEntities(Model):
"""DocumentEntities.
All required parameters must be populated in order to send to Azure.
@@ -55,12 +57,13 @@ class DocumentEntities(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized entities in the document.
- :type entities: list[~azure.ai.textanalytics.models.Entity]
+ :type entities: list[~azure.ai.textanalytics.v3_0.models.Entity]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -76,18 +79,15 @@ class DocumentEntities(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentEntities, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.entities = kwargs['entities']
- self.warnings = kwargs['warnings']
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
self.statistics = kwargs.get('statistics', None)
-class DocumentError(msrest.serialization.Model):
+class DocumentError(Model):
"""DocumentError.
All required parameters must be populated in order to send to Azure.
@@ -95,7 +95,7 @@ class DocumentError(msrest.serialization.Model):
:param id: Required. Document Id.
:type id: str
:param error: Required. Document Error.
- :type error: ~azure.ai.textanalytics.models.TextAnalyticsError
+ :type error: ~azure.ai.textanalytics.v3_0.models.TextAnalyticsError
"""
_validation = {
@@ -108,30 +108,29 @@ class DocumentError(msrest.serialization.Model):
'error': {'key': 'error', 'type': 'TextAnalyticsError'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentError, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.error = kwargs['error']
+ self.id = kwargs.get('id', None)
+ self.error = kwargs.get('error', None)
-class DocumentKeyPhrases(msrest.serialization.Model):
+class DocumentKeyPhrases(Model):
"""DocumentKeyPhrases.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
- :param key_phrases: Required. A list of representative words or phrases. The number of key
- phrases returned is proportional to the number of words in the input document.
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
:type key_phrases: list[str]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -147,18 +146,15 @@ class DocumentKeyPhrases(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentKeyPhrases, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.key_phrases = kwargs['key_phrases']
- self.warnings = kwargs['warnings']
+ self.id = kwargs.get('id', None)
+ self.key_phrases = kwargs.get('key_phrases', None)
+ self.warnings = kwargs.get('warnings', None)
self.statistics = kwargs.get('statistics', None)
-class DocumentLanguage(msrest.serialization.Model):
+class DocumentLanguage(Model):
"""DocumentLanguage.
All required parameters must be populated in order to send to Azure.
@@ -166,12 +162,14 @@ class DocumentLanguage(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param detected_language: Required. Detected Language.
- :type detected_language: ~azure.ai.textanalytics.models.DetectedLanguage
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_0.models.DetectedLanguage
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -187,18 +185,15 @@ class DocumentLanguage(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentLanguage, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.detected_language = kwargs['detected_language']
- self.warnings = kwargs['warnings']
+ self.id = kwargs.get('id', None)
+ self.detected_language = kwargs.get('detected_language', None)
+ self.warnings = kwargs.get('warnings', None)
self.statistics = kwargs.get('statistics', None)
-class DocumentLinkedEntities(msrest.serialization.Model):
+class DocumentLinkedEntities(Model):
"""DocumentLinkedEntities.
All required parameters must be populated in order to send to Azure.
@@ -206,12 +201,13 @@ class DocumentLinkedEntities(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized well-known entities in the document.
- :type entities: list[~azure.ai.textanalytics.models.LinkedEntity]
+ :type entities: list[~azure.ai.textanalytics.v3_0.models.LinkedEntity]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -227,37 +223,38 @@ class DocumentLinkedEntities(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentLinkedEntities, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.entities = kwargs['entities']
- self.warnings = kwargs['warnings']
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
self.statistics = kwargs.get('statistics', None)
-class DocumentSentiment(msrest.serialization.Model):
+class DocumentSentiment(Model):
"""DocumentSentiment.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
- :param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or
- Mixed). Possible values include: "positive", "neutral", "negative", "mixed".
- :type sentiment: str or ~azure.ai.textanalytics.models.DocumentSentimentValue
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
- :param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1
- for each sentiment class.
- :type confidence_scores: ~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_0.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_0.models.SentimentConfidenceScorePerLabel
:param sentences: Required. Sentence level sentiment analysis.
- :type sentences: list[~azure.ai.textanalytics.models.SentenceSentiment]
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_0.models.SentenceSentiment]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
"""
_validation = {
@@ -270,34 +267,34 @@ class DocumentSentiment(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
- 'sentiment': {'key': 'sentiment', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentSentiment, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.sentiment = kwargs['sentiment']
+ self.id = kwargs.get('id', None)
+ self.sentiment = kwargs.get('sentiment', None)
self.statistics = kwargs.get('statistics', None)
- self.confidence_scores = kwargs['confidence_scores']
- self.sentences = kwargs['sentences']
- self.warnings = kwargs['warnings']
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.sentences = kwargs.get('sentences', None)
+ self.warnings = kwargs.get('warnings', None)
-class DocumentStatistics(msrest.serialization.Model):
- """if showStats=true was specified in the request this field will contain information about the document payload.
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
All required parameters must be populated in order to send to Azure.
- :param characters_count: Required. Number of text elements recognized in the document.
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
:type characters_count: int
- :param transactions_count: Required. Number of transactions for the document.
+ :param transactions_count: Required. Number of transactions for the
+ document.
:type transactions_count: int
"""
@@ -311,28 +308,26 @@ class DocumentStatistics(msrest.serialization.Model):
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(DocumentStatistics, self).__init__(**kwargs)
- self.characters_count = kwargs['characters_count']
- self.transactions_count = kwargs['transactions_count']
+ self.characters_count = kwargs.get('characters_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
-class EntitiesResult(msrest.serialization.Model):
+class EntitiesResult(Model):
"""EntitiesResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentEntities]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentEntities]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -349,33 +344,34 @@ class EntitiesResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(EntitiesResult, self).__init__(**kwargs)
- self.documents = kwargs['documents']
- self.errors = kwargs['errors']
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
- self.model_version = kwargs['model_version']
+ self.model_version = kwargs.get('model_version', None)
-class Entity(msrest.serialization.Model):
+class Entity(Model):
"""Entity.
All required parameters must be populated in order to send to Azure.
:param text: Required. Entity text as appears in the request.
:type text: str
- :param category: Required. Entity type, such as Person/Location/Org/SSN etc.
+ :param category: Required. Entity type, such as Person/Location/Org/SSN
+ etc
:type category: str
- :param subcategory: Entity sub type, such as Age/Year/TimeRange etc.
+ :param subcategory: Entity sub type, such as Age/Year/TimeRange etc
:type subcategory: str
- :param offset: Required. Start position (in Unicode characters) for the entity text.
+ :param offset: Required. Start position (in Unicode characters) for the
+ entity text.
:type offset: int
- :param length: Required. Length (in Unicode characters) for the entity text.
+ :param length: Required. Length (in Unicode characters) for the entity
+ text.
:type length: int
- :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity.
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
:type confidence_score: float
"""
@@ -396,32 +392,30 @@ class Entity(msrest.serialization.Model):
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(Entity, self).__init__(**kwargs)
- self.text = kwargs['text']
- self.category = kwargs['category']
+ self.text = kwargs.get('text', None)
+ self.category = kwargs.get('category', None)
self.subcategory = kwargs.get('subcategory', None)
- self.offset = kwargs['offset']
- self.length = kwargs['length']
- self.confidence_score = kwargs['confidence_score']
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
-class EntityLinkingResult(msrest.serialization.Model):
+class EntityLinkingResult(Model):
"""EntityLinkingResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentLinkedEntities]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentLinkedEntities]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -438,27 +432,58 @@ class EntityLinkingResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(EntityLinkingResult, self).__init__(**kwargs)
- self.documents = kwargs['documents']
- self.errors = kwargs['errors']
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
- self.model_version = kwargs['model_version']
+ self.model_version = kwargs.get('model_version', None)
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error: ~azure.ai.textanalytics.v3_0.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = kwargs.get('error', None)
+
+
+class ErrorResponseException(HttpOperationError):
+ """Server responsed with exception of type: 'ErrorResponse'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, deserialize, response, *args):
-class InnerError(msrest.serialization.Model):
+ super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
+
+
+class InnerError(Model):
"""InnerError.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "invalidParameterValue",
- "invalidRequestBodyFormat", "emptyRequest", "missingInputRecords", "invalidDocument",
- "modelVersionIncorrect", "invalidDocumentBatch", "unsupportedLanguageCode",
- "invalidCountryHint".
- :type code: str or ~azure.ai.textanalytics.models.InnerErrorCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'invalidParameterValue', 'invalidRequestBodyFormat', 'emptyRequest',
+ 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
+ 'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.InnerErrorCodeValue
:param message: Required. Error message.
:type message: str
:param details: Error details.
@@ -466,7 +491,7 @@ class InnerError(msrest.serialization.Model):
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
- :type innererror: ~azure.ai.textanalytics.models.InnerError
+ :type innererror: ~azure.ai.textanalytics.v3_0.models.InnerError
"""
_validation = {
@@ -475,38 +500,36 @@ class InnerError(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'InnerErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(InnerError, self).__init__(**kwargs)
- self.code = kwargs['code']
- self.message = kwargs['message']
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
self.details = kwargs.get('details', None)
self.target = kwargs.get('target', None)
self.innererror = kwargs.get('innererror', None)
-class KeyPhraseResult(msrest.serialization.Model):
+class KeyPhraseResult(Model):
"""KeyPhraseResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentKeyPhrases]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentKeyPhrases]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -523,24 +546,21 @@ class KeyPhraseResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(KeyPhraseResult, self).__init__(**kwargs)
- self.documents = kwargs['documents']
- self.errors = kwargs['errors']
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
- self.model_version = kwargs['model_version']
+ self.model_version = kwargs.get('model_version', None)
-class LanguageBatchInput(msrest.serialization.Model):
+class LanguageBatchInput(Model):
"""LanguageBatchInput.
All required parameters must be populated in order to send to Azure.
:param documents: Required.
- :type documents: list[~azure.ai.textanalytics.models.LanguageInput]
+ :type documents: list[~azure.ai.textanalytics.v3_0.models.LanguageInput]
"""
_validation = {
@@ -551,15 +571,12 @@ class LanguageBatchInput(msrest.serialization.Model):
'documents': {'key': 'documents', 'type': '[LanguageInput]'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(LanguageBatchInput, self).__init__(**kwargs)
- self.documents = kwargs['documents']
+ self.documents = kwargs.get('documents', None)
-class LanguageInput(msrest.serialization.Model):
+class LanguageInput(Model):
"""LanguageInput.
All required parameters must be populated in order to send to Azure.
@@ -583,29 +600,27 @@ class LanguageInput(msrest.serialization.Model):
'country_hint': {'key': 'countryHint', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(LanguageInput, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.text = kwargs['text']
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
self.country_hint = kwargs.get('country_hint', None)
-class LanguageResult(msrest.serialization.Model):
+class LanguageResult(Model):
"""LanguageResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentLanguage]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentLanguage]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -622,34 +637,33 @@ class LanguageResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(LanguageResult, self).__init__(**kwargs)
- self.documents = kwargs['documents']
- self.errors = kwargs['errors']
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
- self.model_version = kwargs['model_version']
+ self.model_version = kwargs.get('model_version', None)
-class LinkedEntity(msrest.serialization.Model):
+class LinkedEntity(Model):
"""LinkedEntity.
All required parameters must be populated in order to send to Azure.
:param name: Required. Entity Linking formal name.
:type name: str
- :param matches: Required. List of instances this entity appears in the text.
- :type matches: list[~azure.ai.textanalytics.models.Match]
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_0.models.Match]
:param language: Required. Language used in the data source.
:type language: str
- :param id: Unique identifier of the recognized entity from the data source.
+ :param id: Unique identifier of the recognized entity from the data
+ source.
:type id: str
:param url: Required. URL for the entity's page from the data source.
:type url: str
- :param data_source: Required. Data source used to extract entity linking, such as Wiki/Bing
- etc.
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
:type data_source: str
"""
@@ -670,32 +684,32 @@ class LinkedEntity(msrest.serialization.Model):
'data_source': {'key': 'dataSource', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(LinkedEntity, self).__init__(**kwargs)
- self.name = kwargs['name']
- self.matches = kwargs['matches']
- self.language = kwargs['language']
+ self.name = kwargs.get('name', None)
+ self.matches = kwargs.get('matches', None)
+ self.language = kwargs.get('language', None)
self.id = kwargs.get('id', None)
- self.url = kwargs['url']
- self.data_source = kwargs['data_source']
+ self.url = kwargs.get('url', None)
+ self.data_source = kwargs.get('data_source', None)
-class Match(msrest.serialization.Model):
+class Match(Model):
"""Match.
All required parameters must be populated in order to send to Azure.
- :param confidence_score: Required. If a well-known item is recognized, a decimal number
- denoting the confidence level between 0 and 1 will be returned.
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
:type confidence_score: float
:param text: Required. Entity text as appears in the request.
:type text: str
- :param offset: Required. Start position (in Unicode characters) for the entity match text.
+ :param offset: Required. Start position (in Unicode characters) for the
+ entity match text.
:type offset: int
- :param length: Required. Length (in Unicode characters) for the entity match text.
+ :param length: Required. Length (in Unicode characters) for the entity
+ match text.
:type length: int
"""
@@ -713,24 +727,23 @@ class Match(msrest.serialization.Model):
'length': {'key': 'length', 'type': 'int'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(Match, self).__init__(**kwargs)
- self.confidence_score = kwargs['confidence_score']
- self.text = kwargs['text']
- self.offset = kwargs['offset']
- self.length = kwargs['length']
+ self.confidence_score = kwargs.get('confidence_score', None)
+ self.text = kwargs.get('text', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
-class MultiLanguageBatchInput(msrest.serialization.Model):
+class MultiLanguageBatchInput(Model):
"""Contains a set of input documents to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
"""
_validation = {
@@ -741,15 +754,12 @@ class MultiLanguageBatchInput(msrest.serialization.Model):
'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(MultiLanguageBatchInput, self).__init__(**kwargs)
- self.documents = kwargs['documents']
+ self.documents = kwargs.get('documents', None)
-class MultiLanguageInput(msrest.serialization.Model):
+class MultiLanguageInput(Model):
"""Contains an input document to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
@@ -758,9 +768,9 @@ class MultiLanguageInput(msrest.serialization.Model):
:type id: str
:param text: Required. The input text to process.
:type text: str
- :param language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For
- example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
- default.
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
:type language: str
"""
@@ -775,30 +785,30 @@ class MultiLanguageInput(msrest.serialization.Model):
'language': {'key': 'language', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(MultiLanguageInput, self).__init__(**kwargs)
- self.id = kwargs['id']
- self.text = kwargs['text']
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
self.language = kwargs.get('language', None)
-class RequestStatistics(msrest.serialization.Model):
- """if showStats=true was specified in the request this field will contain information about the request payload.
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
All required parameters must be populated in order to send to Azure.
- :param documents_count: Required. Number of documents submitted in the request.
+ :param documents_count: Required. Number of documents submitted in the
+ request.
:type documents_count: int
- :param valid_documents_count: Required. Number of valid documents. This excludes empty, over-
- size limit or non-supported languages documents.
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
:type valid_documents_count: int
- :param erroneous_documents_count: Required. Number of invalid documents. This includes empty,
- over-size limit or non-supported languages documents.
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
:type erroneous_documents_count: int
- :param transactions_count: Required. Number of transactions for the request.
+ :param transactions_count: Required. Number of transactions for the
+ request.
:type transactions_count: long
"""
@@ -816,37 +826,38 @@ class RequestStatistics(msrest.serialization.Model):
'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(RequestStatistics, self).__init__(**kwargs)
- self.documents_count = kwargs['documents_count']
- self.valid_documents_count = kwargs['valid_documents_count']
- self.erroneous_documents_count = kwargs['erroneous_documents_count']
- self.transactions_count = kwargs['transactions_count']
+ self.documents_count = kwargs.get('documents_count', None)
+ self.valid_documents_count = kwargs.get('valid_documents_count', None)
+ self.erroneous_documents_count = kwargs.get('erroneous_documents_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
-class SentenceSentiment(msrest.serialization.Model):
+class SentenceSentiment(Model):
"""SentenceSentiment.
All required parameters must be populated in order to send to Azure.
- :param text: The sentence text.
+ :param text: Required. The sentence text.
:type text: str
- :param sentiment: Required. The predicted Sentiment for the sentence. Possible values include:
- "positive", "neutral", "negative".
- :type sentiment: str or ~azure.ai.textanalytics.models.SentenceSentimentValue
- :param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the
- sentence for all classes.
- :type confidence_scores: ~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
- :param offset: Required. The sentence offset from the start of the document.
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_0.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_0.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
:type offset: int
:param length: Required. The length of the sentence by Unicode standard.
:type length: int
"""
_validation = {
+ 'text': {'required': True},
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'offset': {'required': True},
@@ -855,26 +866,24 @@ class SentenceSentiment(msrest.serialization.Model):
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
- 'sentiment': {'key': 'sentiment', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(SentenceSentiment, self).__init__(**kwargs)
self.text = kwargs.get('text', None)
- self.sentiment = kwargs['sentiment']
- self.confidence_scores = kwargs['confidence_scores']
- self.offset = kwargs['offset']
- self.length = kwargs['length']
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
-class SentimentConfidenceScorePerLabel(msrest.serialization.Model):
- """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative.
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
All required parameters must be populated in order to send to Azure.
@@ -898,29 +907,27 @@ class SentimentConfidenceScorePerLabel(msrest.serialization.Model):
'negative': {'key': 'negative', 'type': 'float'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
- self.positive = kwargs['positive']
- self.neutral = kwargs['neutral']
- self.negative = kwargs['negative']
+ self.positive = kwargs.get('positive', None)
+ self.neutral = kwargs.get('neutral', None)
+ self.negative = kwargs.get('negative', None)
-class SentimentResponse(msrest.serialization.Model):
+class SentimentResponse(Model):
"""SentimentResponse.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Sentiment analysis per document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentSentiment]
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentSentiment]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -937,33 +944,33 @@ class SentimentResponse(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(SentimentResponse, self).__init__(**kwargs)
- self.documents = kwargs['documents']
- self.errors = kwargs['errors']
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
- self.model_version = kwargs['model_version']
+ self.model_version = kwargs.get('model_version', None)
-class TextAnalyticsError(msrest.serialization.Model):
+class TextAnalyticsError(Model):
"""TextAnalyticsError.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "invalidRequest",
- "invalidArgument", "internalServerError", "serviceUnavailable".
- :type code: str or ~azure.ai.textanalytics.models.ErrorCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'invalidRequest', 'invalidArgument', 'internalServerError',
+ 'serviceUnavailable'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.ErrorCodeValue
:param message: Required. Error message.
:type message: str
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
- :type innererror: ~azure.ai.textanalytics.models.InnerError
- :param details: Details about specific errors that led to this reported error.
- :type details: list[~azure.ai.textanalytics.models.TextAnalyticsError]
+ :type innererror: ~azure.ai.textanalytics.v3_0.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsError]
"""
_validation = {
@@ -972,33 +979,30 @@ class TextAnalyticsError(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(TextAnalyticsError, self).__init__(**kwargs)
- self.code = kwargs['code']
- self.message = kwargs['message']
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.innererror = kwargs.get('innererror', None)
self.details = kwargs.get('details', None)
-class TextAnalyticsWarning(msrest.serialization.Model):
+class TextAnalyticsWarning(Model):
"""TextAnalyticsWarning.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "LongWordsInDocument",
- "DocumentTruncated".
- :type code: str or ~azure.ai.textanalytics.models.WarningCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.WarningCodeValue
:param message: Required. Warning message.
:type message: str
:param target_ref: A JSON pointer reference indicating the target object.
@@ -1011,16 +1015,13 @@ class TextAnalyticsWarning(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'WarningCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'target_ref': {'key': 'targetRef', 'type': 'str'},
}
- def __init__(
- self,
- **kwargs
- ):
+ def __init__(self, **kwargs):
super(TextAnalyticsWarning, self).__init__(**kwargs)
- self.code = kwargs['code']
- self.message = kwargs['message']
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
self.target_ref = kwargs.get('target_ref', None)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models_py3.py
similarity index 64%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models_py3.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models_py3.py
index b969f1ca74e8..090028eb1622 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_models_py3.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_models_py3.py
@@ -1,31 +1,32 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
-from typing import Dict, List, Optional, Union
+from msrest.serialization import Model
+from msrest.exceptions import HttpOperationError
-from azure.core.exceptions import HttpResponseError
-import msrest.serialization
-from ._text_analytics_client_enums import *
-
-
-class DetectedLanguage(msrest.serialization.Model):
+class DetectedLanguage(Model):
"""DetectedLanguage.
All required parameters must be populated in order to send to Azure.
- :param name: Required. Long name of a detected language (e.g. English, French).
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
:type name: str
- :param iso6391_name: Required. A two letter representation of the detected language according
- to the ISO 639-1 standard (e.g. en, fr).
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
:type iso6391_name: str
- :param confidence_score: Required. A confidence score between 0 and 1. Scores close to 1
- indicate 100% certainty that the identified language is true.
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
:type confidence_score: float
"""
@@ -41,21 +42,14 @@ class DetectedLanguage(msrest.serialization.Model):
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
- def __init__(
- self,
- *,
- name: str,
- iso6391_name: str,
- confidence_score: float,
- **kwargs
- ):
+ def __init__(self, *, name: str, iso6391_name: str, confidence_score: float, **kwargs) -> None:
super(DetectedLanguage, self).__init__(**kwargs)
self.name = name
self.iso6391_name = iso6391_name
self.confidence_score = confidence_score
-class DocumentEntities(msrest.serialization.Model):
+class DocumentEntities(Model):
"""DocumentEntities.
All required parameters must be populated in order to send to Azure.
@@ -63,12 +57,13 @@ class DocumentEntities(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized entities in the document.
- :type entities: list[~azure.ai.textanalytics.models.Entity]
+ :type entities: list[~azure.ai.textanalytics.v3_0.models.Entity]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -84,15 +79,7 @@ class DocumentEntities(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- *,
- id: str,
- entities: List["Entity"],
- warnings: List["TextAnalyticsWarning"],
- statistics: Optional["DocumentStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
super(DocumentEntities, self).__init__(**kwargs)
self.id = id
self.entities = entities
@@ -100,7 +87,7 @@ def __init__(
self.statistics = statistics
-class DocumentError(msrest.serialization.Model):
+class DocumentError(Model):
"""DocumentError.
All required parameters must be populated in order to send to Azure.
@@ -108,7 +95,7 @@ class DocumentError(msrest.serialization.Model):
:param id: Required. Document Id.
:type id: str
:param error: Required. Document Error.
- :type error: ~azure.ai.textanalytics.models.TextAnalyticsError
+ :type error: ~azure.ai.textanalytics.v3_0.models.TextAnalyticsError
"""
_validation = {
@@ -121,33 +108,29 @@ class DocumentError(msrest.serialization.Model):
'error': {'key': 'error', 'type': 'TextAnalyticsError'},
}
- def __init__(
- self,
- *,
- id: str,
- error: "TextAnalyticsError",
- **kwargs
- ):
+ def __init__(self, *, id: str, error, **kwargs) -> None:
super(DocumentError, self).__init__(**kwargs)
self.id = id
self.error = error
-class DocumentKeyPhrases(msrest.serialization.Model):
+class DocumentKeyPhrases(Model):
"""DocumentKeyPhrases.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
- :param key_phrases: Required. A list of representative words or phrases. The number of key
- phrases returned is proportional to the number of words in the input document.
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
:type key_phrases: list[str]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -163,15 +146,7 @@ class DocumentKeyPhrases(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- *,
- id: str,
- key_phrases: List[str],
- warnings: List["TextAnalyticsWarning"],
- statistics: Optional["DocumentStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, key_phrases, warnings, statistics=None, **kwargs) -> None:
super(DocumentKeyPhrases, self).__init__(**kwargs)
self.id = id
self.key_phrases = key_phrases
@@ -179,7 +154,7 @@ def __init__(
self.statistics = statistics
-class DocumentLanguage(msrest.serialization.Model):
+class DocumentLanguage(Model):
"""DocumentLanguage.
All required parameters must be populated in order to send to Azure.
@@ -187,12 +162,14 @@ class DocumentLanguage(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param detected_language: Required. Detected Language.
- :type detected_language: ~azure.ai.textanalytics.models.DetectedLanguage
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_0.models.DetectedLanguage
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -208,15 +185,7 @@ class DocumentLanguage(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- *,
- id: str,
- detected_language: "DetectedLanguage",
- warnings: List["TextAnalyticsWarning"],
- statistics: Optional["DocumentStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, detected_language, warnings, statistics=None, **kwargs) -> None:
super(DocumentLanguage, self).__init__(**kwargs)
self.id = id
self.detected_language = detected_language
@@ -224,7 +193,7 @@ def __init__(
self.statistics = statistics
-class DocumentLinkedEntities(msrest.serialization.Model):
+class DocumentLinkedEntities(Model):
"""DocumentLinkedEntities.
All required parameters must be populated in order to send to Azure.
@@ -232,12 +201,13 @@ class DocumentLinkedEntities(msrest.serialization.Model):
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized well-known entities in the document.
- :type entities: list[~azure.ai.textanalytics.models.LinkedEntity]
+ :type entities: list[~azure.ai.textanalytics.v3_0.models.LinkedEntity]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
"""
_validation = {
@@ -253,15 +223,7 @@ class DocumentLinkedEntities(msrest.serialization.Model):
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
- def __init__(
- self,
- *,
- id: str,
- entities: List["LinkedEntity"],
- warnings: List["TextAnalyticsWarning"],
- statistics: Optional["DocumentStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
super(DocumentLinkedEntities, self).__init__(**kwargs)
self.id = id
self.entities = entities
@@ -269,26 +231,30 @@ def __init__(
self.statistics = statistics
-class DocumentSentiment(msrest.serialization.Model):
+class DocumentSentiment(Model):
"""DocumentSentiment.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
- :param sentiment: Required. Predicted sentiment for document (Negative, Neutral, Positive, or
- Mixed). Possible values include: "positive", "neutral", "negative", "mixed".
- :type sentiment: str or ~azure.ai.textanalytics.models.DocumentSentimentValue
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the document payload.
- :type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
- :param confidence_scores: Required. Document level sentiment confidence scores between 0 and 1
- for each sentiment class.
- :type confidence_scores: ~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_0.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_0.models.SentimentConfidenceScorePerLabel
:param sentences: Required. Sentence level sentiment analysis.
- :type sentences: list[~azure.ai.textanalytics.models.SentenceSentiment]
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_0.models.SentenceSentiment]
:param warnings: Required. Warnings encountered while processing document.
- :type warnings: list[~azure.ai.textanalytics.models.TextAnalyticsWarning]
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsWarning]
"""
_validation = {
@@ -301,24 +267,14 @@ class DocumentSentiment(msrest.serialization.Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
- 'sentiment': {'key': 'sentiment', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
}
- def __init__(
- self,
- *,
- id: str,
- sentiment: Union[str, "DocumentSentimentValue"],
- confidence_scores: "SentimentConfidenceScorePerLabel",
- sentences: List["SentenceSentiment"],
- warnings: List["TextAnalyticsWarning"],
- statistics: Optional["DocumentStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, sentiment, confidence_scores, sentences, warnings, statistics=None, **kwargs) -> None:
super(DocumentSentiment, self).__init__(**kwargs)
self.id = id
self.sentiment = sentiment
@@ -328,14 +284,17 @@ def __init__(
self.warnings = warnings
-class DocumentStatistics(msrest.serialization.Model):
- """if showStats=true was specified in the request this field will contain information about the document payload.
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
All required parameters must be populated in order to send to Azure.
- :param characters_count: Required. Number of text elements recognized in the document.
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
:type characters_count: int
- :param transactions_count: Required. Number of transactions for the document.
+ :param transactions_count: Required. Number of transactions for the
+ document.
:type transactions_count: int
"""
@@ -349,31 +308,26 @@ class DocumentStatistics(msrest.serialization.Model):
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
- def __init__(
- self,
- *,
- characters_count: int,
- transactions_count: int,
- **kwargs
- ):
+ def __init__(self, *, characters_count: int, transactions_count: int, **kwargs) -> None:
super(DocumentStatistics, self).__init__(**kwargs)
self.characters_count = characters_count
self.transactions_count = transactions_count
-class EntitiesResult(msrest.serialization.Model):
+class EntitiesResult(Model):
"""EntitiesResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentEntities]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentEntities]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -390,15 +344,7 @@ class EntitiesResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- *,
- documents: List["DocumentEntities"],
- errors: List["DocumentError"],
- model_version: str,
- statistics: Optional["RequestStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
super(EntitiesResult, self).__init__(**kwargs)
self.documents = documents
self.errors = errors
@@ -406,22 +352,26 @@ def __init__(
self.model_version = model_version
-class Entity(msrest.serialization.Model):
+class Entity(Model):
"""Entity.
All required parameters must be populated in order to send to Azure.
:param text: Required. Entity text as appears in the request.
:type text: str
- :param category: Required. Entity type, such as Person/Location/Org/SSN etc.
+ :param category: Required. Entity type, such as Person/Location/Org/SSN
+ etc
:type category: str
- :param subcategory: Entity sub type, such as Age/Year/TimeRange etc.
+ :param subcategory: Entity sub type, such as Age/Year/TimeRange etc
:type subcategory: str
- :param offset: Required. Start position (in Unicode characters) for the entity text.
+ :param offset: Required. Start position (in Unicode characters) for the
+ entity text.
:type offset: int
- :param length: Required. Length (in Unicode characters) for the entity text.
+ :param length: Required. Length (in Unicode characters) for the entity
+ text.
:type length: int
- :param confidence_score: Required. Confidence score between 0 and 1 of the extracted entity.
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
:type confidence_score: float
"""
@@ -442,17 +392,7 @@ class Entity(msrest.serialization.Model):
'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
}
- def __init__(
- self,
- *,
- text: str,
- category: str,
- offset: int,
- length: int,
- confidence_score: float,
- subcategory: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, subcategory: str=None, **kwargs) -> None:
super(Entity, self).__init__(**kwargs)
self.text = text
self.category = category
@@ -462,19 +402,20 @@ def __init__(
self.confidence_score = confidence_score
-class EntityLinkingResult(msrest.serialization.Model):
+class EntityLinkingResult(Model):
"""EntityLinkingResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentLinkedEntities]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentLinkedEntities]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -491,15 +432,7 @@ class EntityLinkingResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- *,
- documents: List["DocumentLinkedEntities"],
- errors: List["DocumentError"],
- model_version: str,
- statistics: Optional["RequestStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
super(EntityLinkingResult, self).__init__(**kwargs)
self.documents = documents
self.errors = errors
@@ -507,16 +440,50 @@ def __init__(
self.model_version = model_version
-class InnerError(msrest.serialization.Model):
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error: ~azure.ai.textanalytics.v3_0.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, error, **kwargs) -> None:
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = error
+
+
+class ErrorResponseException(HttpOperationError):
+ """Server responsed with exception of type: 'ErrorResponse'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, deserialize, response, *args):
+
+ super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
+
+
+class InnerError(Model):
"""InnerError.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "invalidParameterValue",
- "invalidRequestBodyFormat", "emptyRequest", "missingInputRecords", "invalidDocument",
- "modelVersionIncorrect", "invalidDocumentBatch", "unsupportedLanguageCode",
- "invalidCountryHint".
- :type code: str or ~azure.ai.textanalytics.models.InnerErrorCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'invalidParameterValue', 'invalidRequestBodyFormat', 'emptyRequest',
+ 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
+ 'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.InnerErrorCodeValue
:param message: Required. Error message.
:type message: str
:param details: Error details.
@@ -524,7 +491,7 @@ class InnerError(msrest.serialization.Model):
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
- :type innererror: ~azure.ai.textanalytics.models.InnerError
+ :type innererror: ~azure.ai.textanalytics.v3_0.models.InnerError
"""
_validation = {
@@ -533,23 +500,14 @@ class InnerError(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'InnerErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
}
- def __init__(
- self,
- *,
- code: Union[str, "InnerErrorCodeValue"],
- message: str,
- details: Optional[Dict[str, str]] = None,
- target: Optional[str] = None,
- innererror: Optional["InnerError"] = None,
- **kwargs
- ):
+ def __init__(self, *, code, message: str, details=None, target: str=None, innererror=None, **kwargs) -> None:
super(InnerError, self).__init__(**kwargs)
self.code = code
self.message = message
@@ -558,19 +516,20 @@ def __init__(
self.innererror = innererror
-class KeyPhraseResult(msrest.serialization.Model):
+class KeyPhraseResult(Model):
"""KeyPhraseResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentKeyPhrases]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentKeyPhrases]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -587,15 +546,7 @@ class KeyPhraseResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- *,
- documents: List["DocumentKeyPhrases"],
- errors: List["DocumentError"],
- model_version: str,
- statistics: Optional["RequestStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
super(KeyPhraseResult, self).__init__(**kwargs)
self.documents = documents
self.errors = errors
@@ -603,13 +554,13 @@ def __init__(
self.model_version = model_version
-class LanguageBatchInput(msrest.serialization.Model):
+class LanguageBatchInput(Model):
"""LanguageBatchInput.
All required parameters must be populated in order to send to Azure.
:param documents: Required.
- :type documents: list[~azure.ai.textanalytics.models.LanguageInput]
+ :type documents: list[~azure.ai.textanalytics.v3_0.models.LanguageInput]
"""
_validation = {
@@ -620,17 +571,12 @@ class LanguageBatchInput(msrest.serialization.Model):
'documents': {'key': 'documents', 'type': '[LanguageInput]'},
}
- def __init__(
- self,
- *,
- documents: List["LanguageInput"],
- **kwargs
- ):
+ def __init__(self, *, documents, **kwargs) -> None:
super(LanguageBatchInput, self).__init__(**kwargs)
self.documents = documents
-class LanguageInput(msrest.serialization.Model):
+class LanguageInput(Model):
"""LanguageInput.
All required parameters must be populated in order to send to Azure.
@@ -654,33 +600,27 @@ class LanguageInput(msrest.serialization.Model):
'country_hint': {'key': 'countryHint', 'type': 'str'},
}
- def __init__(
- self,
- *,
- id: str,
- text: str,
- country_hint: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, text: str, country_hint: str=None, **kwargs) -> None:
super(LanguageInput, self).__init__(**kwargs)
self.id = id
self.text = text
self.country_hint = country_hint
-class LanguageResult(msrest.serialization.Model):
+class LanguageResult(Model):
"""LanguageResult.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. Response by document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentLanguage]
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentLanguage]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -697,15 +637,7 @@ class LanguageResult(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- *,
- documents: List["DocumentLanguage"],
- errors: List["DocumentError"],
- model_version: str,
- statistics: Optional["RequestStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
super(LanguageResult, self).__init__(**kwargs)
self.documents = documents
self.errors = errors
@@ -713,23 +645,25 @@ def __init__(
self.model_version = model_version
-class LinkedEntity(msrest.serialization.Model):
+class LinkedEntity(Model):
"""LinkedEntity.
All required parameters must be populated in order to send to Azure.
:param name: Required. Entity Linking formal name.
:type name: str
- :param matches: Required. List of instances this entity appears in the text.
- :type matches: list[~azure.ai.textanalytics.models.Match]
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_0.models.Match]
:param language: Required. Language used in the data source.
:type language: str
- :param id: Unique identifier of the recognized entity from the data source.
+ :param id: Unique identifier of the recognized entity from the data
+ source.
:type id: str
:param url: Required. URL for the entity's page from the data source.
:type url: str
- :param data_source: Required. Data source used to extract entity linking, such as Wiki/Bing
- etc.
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
:type data_source: str
"""
@@ -750,17 +684,7 @@ class LinkedEntity(msrest.serialization.Model):
'data_source': {'key': 'dataSource', 'type': 'str'},
}
- def __init__(
- self,
- *,
- name: str,
- matches: List["Match"],
- language: str,
- url: str,
- data_source: str,
- id: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, name: str, matches, language: str, url: str, data_source: str, id: str=None, **kwargs) -> None:
super(LinkedEntity, self).__init__(**kwargs)
self.name = name
self.matches = matches
@@ -770,19 +694,22 @@ def __init__(
self.data_source = data_source
-class Match(msrest.serialization.Model):
+class Match(Model):
"""Match.
All required parameters must be populated in order to send to Azure.
- :param confidence_score: Required. If a well-known item is recognized, a decimal number
- denoting the confidence level between 0 and 1 will be returned.
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
:type confidence_score: float
:param text: Required. Entity text as appears in the request.
:type text: str
- :param offset: Required. Start position (in Unicode characters) for the entity match text.
+ :param offset: Required. Start position (in Unicode characters) for the
+ entity match text.
:type offset: int
- :param length: Required. Length (in Unicode characters) for the entity match text.
+ :param length: Required. Length (in Unicode characters) for the entity
+ match text.
:type length: int
"""
@@ -800,15 +727,7 @@ class Match(msrest.serialization.Model):
'length': {'key': 'length', 'type': 'int'},
}
- def __init__(
- self,
- *,
- confidence_score: float,
- text: str,
- offset: int,
- length: int,
- **kwargs
- ):
+ def __init__(self, *, confidence_score: float, text: str, offset: int, length: int, **kwargs) -> None:
super(Match, self).__init__(**kwargs)
self.confidence_score = confidence_score
self.text = text
@@ -816,13 +735,15 @@ def __init__(
self.length = length
-class MultiLanguageBatchInput(msrest.serialization.Model):
+class MultiLanguageBatchInput(Model):
"""Contains a set of input documents to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
- :param documents: Required. The set of documents to process as part of this batch.
- :type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
"""
_validation = {
@@ -833,17 +754,12 @@ class MultiLanguageBatchInput(msrest.serialization.Model):
'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
}
- def __init__(
- self,
- *,
- documents: List["MultiLanguageInput"],
- **kwargs
- ):
+ def __init__(self, *, documents, **kwargs) -> None:
super(MultiLanguageBatchInput, self).__init__(**kwargs)
self.documents = documents
-class MultiLanguageInput(msrest.serialization.Model):
+class MultiLanguageInput(Model):
"""Contains an input document to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
@@ -852,9 +768,9 @@ class MultiLanguageInput(msrest.serialization.Model):
:type id: str
:param text: Required. The input text to process.
:type text: str
- :param language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For
- example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
- default.
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
:type language: str
"""
@@ -869,34 +785,30 @@ class MultiLanguageInput(msrest.serialization.Model):
'language': {'key': 'language', 'type': 'str'},
}
- def __init__(
- self,
- *,
- id: str,
- text: str,
- language: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, id: str, text: str, language: str=None, **kwargs) -> None:
super(MultiLanguageInput, self).__init__(**kwargs)
self.id = id
self.text = text
self.language = language
-class RequestStatistics(msrest.serialization.Model):
- """if showStats=true was specified in the request this field will contain information about the request payload.
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
All required parameters must be populated in order to send to Azure.
- :param documents_count: Required. Number of documents submitted in the request.
+ :param documents_count: Required. Number of documents submitted in the
+ request.
:type documents_count: int
- :param valid_documents_count: Required. Number of valid documents. This excludes empty, over-
- size limit or non-supported languages documents.
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
:type valid_documents_count: int
- :param erroneous_documents_count: Required. Number of invalid documents. This includes empty,
- over-size limit or non-supported languages documents.
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
:type erroneous_documents_count: int
- :param transactions_count: Required. Number of transactions for the request.
+ :param transactions_count: Required. Number of transactions for the
+ request.
:type transactions_count: long
"""
@@ -914,15 +826,7 @@ class RequestStatistics(msrest.serialization.Model):
'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
}
- def __init__(
- self,
- *,
- documents_count: int,
- valid_documents_count: int,
- erroneous_documents_count: int,
- transactions_count: int,
- **kwargs
- ):
+ def __init__(self, *, documents_count: int, valid_documents_count: int, erroneous_documents_count: int, transactions_count: int, **kwargs) -> None:
super(RequestStatistics, self).__init__(**kwargs)
self.documents_count = documents_count
self.valid_documents_count = valid_documents_count
@@ -930,26 +834,30 @@ def __init__(
self.transactions_count = transactions_count
-class SentenceSentiment(msrest.serialization.Model):
+class SentenceSentiment(Model):
"""SentenceSentiment.
All required parameters must be populated in order to send to Azure.
- :param text: The sentence text.
+ :param text: Required. The sentence text.
:type text: str
- :param sentiment: Required. The predicted Sentiment for the sentence. Possible values include:
- "positive", "neutral", "negative".
- :type sentiment: str or ~azure.ai.textanalytics.models.SentenceSentimentValue
- :param confidence_scores: Required. The sentiment confidence score between 0 and 1 for the
- sentence for all classes.
- :type confidence_scores: ~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
- :param offset: Required. The sentence offset from the start of the document.
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_0.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_0.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
:type offset: int
:param length: Required. The length of the sentence by Unicode standard.
:type length: int
"""
_validation = {
+ 'text': {'required': True},
'sentiment': {'required': True},
'confidence_scores': {'required': True},
'offset': {'required': True},
@@ -958,22 +866,13 @@ class SentenceSentiment(msrest.serialization.Model):
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
- 'sentiment': {'key': 'sentiment', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
}
- def __init__(
- self,
- *,
- sentiment: Union[str, "SentenceSentimentValue"],
- confidence_scores: "SentimentConfidenceScorePerLabel",
- offset: int,
- length: int,
- text: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, text: str, sentiment, confidence_scores, offset: int, length: int, **kwargs) -> None:
super(SentenceSentiment, self).__init__(**kwargs)
self.text = text
self.sentiment = sentiment
@@ -982,8 +881,9 @@ def __init__(
self.length = length
-class SentimentConfidenceScorePerLabel(msrest.serialization.Model):
- """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, neutral, negative.
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
All required parameters must be populated in order to send to Azure.
@@ -1007,33 +907,27 @@ class SentimentConfidenceScorePerLabel(msrest.serialization.Model):
'negative': {'key': 'negative', 'type': 'float'},
}
- def __init__(
- self,
- *,
- positive: float,
- neutral: float,
- negative: float,
- **kwargs
- ):
+ def __init__(self, *, positive: float, neutral: float, negative: float, **kwargs) -> None:
super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
self.positive = positive
self.neutral = neutral
self.negative = negative
-class SentimentResponse(msrest.serialization.Model):
+class SentimentResponse(Model):
"""SentimentResponse.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Sentiment analysis per document.
- :type documents: list[~azure.ai.textanalytics.models.DocumentSentiment]
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.DocumentSentiment]
:param errors: Required. Errors by document id.
- :type errors: list[~azure.ai.textanalytics.models.DocumentError]
- :param statistics: if showStats=true was specified in the request this field will contain
- information about the request payload.
- :type statistics: ~azure.ai.textanalytics.models.RequestStatistics
- :param model_version: Required. This field indicates which model is used for scoring.
+ :type errors: list[~azure.ai.textanalytics.v3_0.models.DocumentError]
+ :param statistics:
+ :type statistics: ~azure.ai.textanalytics.v3_0.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
:type model_version: str
"""
@@ -1050,15 +944,7 @@ class SentimentResponse(msrest.serialization.Model):
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
- def __init__(
- self,
- *,
- documents: List["DocumentSentiment"],
- errors: List["DocumentError"],
- model_version: str,
- statistics: Optional["RequestStatistics"] = None,
- **kwargs
- ):
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
super(SentimentResponse, self).__init__(**kwargs)
self.documents = documents
self.errors = errors
@@ -1066,22 +952,25 @@ def __init__(
self.model_version = model_version
-class TextAnalyticsError(msrest.serialization.Model):
+class TextAnalyticsError(Model):
"""TextAnalyticsError.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "invalidRequest",
- "invalidArgument", "internalServerError", "serviceUnavailable".
- :type code: str or ~azure.ai.textanalytics.models.ErrorCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'invalidRequest', 'invalidArgument', 'internalServerError',
+ 'serviceUnavailable'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.ErrorCodeValue
:param message: Required. Error message.
:type message: str
:param target: Error target.
:type target: str
:param innererror: Inner error contains more specific information.
- :type innererror: ~azure.ai.textanalytics.models.InnerError
- :param details: Details about specific errors that led to this reported error.
- :type details: list[~azure.ai.textanalytics.models.TextAnalyticsError]
+ :type innererror: ~azure.ai.textanalytics.v3_0.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_0.models.TextAnalyticsError]
"""
_validation = {
@@ -1090,23 +979,14 @@ class TextAnalyticsError(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
}
- def __init__(
- self,
- *,
- code: Union[str, "ErrorCodeValue"],
- message: str,
- target: Optional[str] = None,
- innererror: Optional["InnerError"] = None,
- details: Optional[List["TextAnalyticsError"]] = None,
- **kwargs
- ):
+ def __init__(self, *, code, message: str, target: str=None, innererror=None, details=None, **kwargs) -> None:
super(TextAnalyticsError, self).__init__(**kwargs)
self.code = code
self.message = message
@@ -1115,14 +995,14 @@ def __init__(
self.details = details
-class TextAnalyticsWarning(msrest.serialization.Model):
+class TextAnalyticsWarning(Model):
"""TextAnalyticsWarning.
All required parameters must be populated in order to send to Azure.
- :param code: Required. Error code. Possible values include: "LongWordsInDocument",
- "DocumentTruncated".
- :type code: str or ~azure.ai.textanalytics.models.WarningCodeValue
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or ~azure.ai.textanalytics.v3_0.models.WarningCodeValue
:param message: Required. Warning message.
:type message: str
:param target_ref: A JSON pointer reference indicating the target object.
@@ -1135,19 +1015,12 @@ class TextAnalyticsWarning(msrest.serialization.Model):
}
_attribute_map = {
- 'code': {'key': 'code', 'type': 'str'},
+ 'code': {'key': 'code', 'type': 'WarningCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'target_ref': {'key': 'targetRef', 'type': 'str'},
}
- def __init__(
- self,
- *,
- code: Union[str, "WarningCodeValue"],
- message: str,
- target_ref: Optional[str] = None,
- **kwargs
- ):
+ def __init__(self, *, code, message: str, target_ref: str=None, **kwargs) -> None:
super(TextAnalyticsWarning, self).__init__(**kwargs)
self.code = code
self.message = message
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_text_analytics_client_enums.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_text_analytics_client_enums.py
similarity index 84%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_text_analytics_client_enums.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_text_analytics_client_enums.py
index e96d02393a83..7329e4e2f4d5 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/models/_text_analytics_client_enums.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/models/_text_analytics_client_enums.py
@@ -1,34 +1,26 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
-class DocumentSentimentValue(str, Enum):
- """Predicted sentiment for document (Negative, Neutral, Positive, or Mixed).
- """
-
- positive = "positive"
- neutral = "neutral"
- negative = "negative"
- mixed = "mixed"
class ErrorCodeValue(str, Enum):
- """Error code.
- """
invalid_request = "invalidRequest"
invalid_argument = "invalidArgument"
internal_server_error = "internalServerError"
service_unavailable = "serviceUnavailable"
+
class InnerErrorCodeValue(str, Enum):
- """Error code.
- """
invalid_parameter_value = "invalidParameterValue"
invalid_request_body_format = "invalidRequestBodyFormat"
@@ -40,17 +32,23 @@ class InnerErrorCodeValue(str, Enum):
unsupported_language_code = "unsupportedLanguageCode"
invalid_country_hint = "invalidCountryHint"
-class SentenceSentimentValue(str, Enum):
- """The predicted Sentiment for the sentence.
- """
+
+class WarningCodeValue(str, Enum):
+
+ long_words_in_document = "LongWordsInDocument"
+ document_truncated = "DocumentTruncated"
+
+
+class DocumentSentimentValue(str, Enum):
positive = "positive"
neutral = "neutral"
negative = "negative"
+ mixed = "mixed"
-class WarningCodeValue(str, Enum):
- """Error code.
- """
- long_words_in_document = "LongWordsInDocument"
- document_truncated = "DocumentTruncated"
+class SentenceSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/__init__.py
similarity index 90%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/__init__.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/__init__.py
index 4384511c0346..e87e22b49362 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/operations/__init__.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/__init__.py
@@ -1,9 +1,12 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/_text_analytics_client_operations.py
new file mode 100644
index 000000000000..cc8b6bf4ad8d
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/operations/_text_analytics_client_operations.py
@@ -0,0 +1,391 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.pipeline import ClientRawResponse
+from .. import models
+
+
+class TextAnalyticsClientOperationsMixin(object):
+
+ def entities_recognition_general(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Named Entity Recognition.
+
+ The API returns a list of general named entities in a given document.
+ For the list of supported entity types, check Supported Entity Types in Text Analytics
+ API. See the Supported languages
+ in Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ input and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: EntitiesResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_0.models.EntitiesResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_general.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntitiesResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_general.metadata = {'url': '/entities/recognition/general'}
+
+ def entities_linking(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Linked entities from a well-known knowledge base.
+
+ The API returns a list of recognized entities with links to a
+ well-known knowledge base. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ input and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: EntityLinkingResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_0.models.EntityLinkingResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_linking.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntityLinkingResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_linking.metadata = {'url': '/entities/linking'}
+
+ def key_phrases(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Key Phrases.
+
+ The API returns a list of strings denoting the key phrases in the input
+ text. See the Supported languages in
+ Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ input and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: KeyPhraseResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_0.models.KeyPhraseResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.key_phrases.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('KeyPhraseResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ key_phrases.metadata = {'url': '/keyPhrases'}
+
+ def languages(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Detect Language.
+
+ The API returns the detected language and a numeric score between 0 and
+ 1. Scores close to 1 indicate 100% certainty that the identified
+ language is true. See the Supported
+ languages in Text Analytics API for the list of enabled languages.
+
+ :param documents:
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.LanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ input and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: LanguageResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_0.models.LanguageResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.LanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.languages.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'LanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('LanguageResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ languages.metadata = {'url': '/languages'}
+
+ def sentiment(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Sentiment.
+
+ The API returns a sentiment prediction, as well as sentiment scores for
+ each sentiment class (Positive, Negative, and Neutral) for the document
+ and each sentence within it. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_0.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ input and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: SentimentResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_0.models.SentimentResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.sentiment.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SentimentResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ sentiment.metadata = {'url': '/sentiment'}
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/version.py
similarity index 73%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/__init__.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/version.py
index ffe1820f1f27..b8ffb04f789f 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/__init__.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_0/version.py
@@ -1,10 +1,13 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
-from ._text_analytics_client_async import TextAnalyticsClient
-__all__ = ['TextAnalyticsClient']
+VERSION = "0.0.1"
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/__init__.py
new file mode 100644
index 000000000000..48d59a14cb3c
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/__init__.py
@@ -0,0 +1,19 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._configuration import TextAnalyticsClientConfiguration
+from ._text_analytics_client import TextAnalyticsClient
+__all__ = ['TextAnalyticsClient', 'TextAnalyticsClientConfiguration']
+
+from .version import VERSION
+
+__version__ = VERSION
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_configuration.py
new file mode 100644
index 000000000000..ea33ce970779
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_configuration.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest import Configuration
+
+from .version import VERSION
+
+
+class TextAnalyticsClientConfiguration(Configuration):
+ """Configuration for TextAnalyticsClient
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ if endpoint is None:
+ raise ValueError("Parameter 'endpoint' must not be None.")
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ base_url = '{Endpoint}/text/analytics/v3.1-preview.2'
+
+ super(TextAnalyticsClientConfiguration, self).__init__(base_url)
+
+ # Starting Autorest.Python 4.0.64, make connection pool activated by default
+ self.keep_alive = True
+
+ self.add_user_agent('azure-ai-textanalytics/{}'.format(VERSION))
+
+ self.endpoint = endpoint
+ self.credentials = credentials
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_text_analytics_client.py
new file mode 100644
index 000000000000..f7e5b9dec9e2
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/_text_analytics_client.py
@@ -0,0 +1,44 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.service_client import SDKClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import TextAnalyticsClientConfiguration
+from .operations import TextAnalyticsClientOperationsMixin
+from . import models
+
+
+class TextAnalyticsClient(TextAnalyticsClientOperationsMixin, SDKClient):
+ """The Text Analytics API is a suite of text analytics web services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. No training data is needed to use this API; just bring your text data. This API uses advanced natural language processing techniques to deliver best in class predictions. Further documentation can be found in https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview
+
+ :ivar config: Configuration for client.
+ :vartype config: TextAnalyticsClientConfiguration
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ self.config = TextAnalyticsClientConfiguration(endpoint, credentials)
+ super(TextAnalyticsClient, self).__init__(self.config.credentials, self.config)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = 'v3.1-preview.2'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/__init__.py
new file mode 100644
index 000000000000..63224aec06ae
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/__init__.py
@@ -0,0 +1,133 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import AspectConfidenceScoreLabel
+ from ._models_py3 import AspectRelation
+ from ._models_py3 import DetectedLanguage
+ from ._models_py3 import DocumentEntities
+ from ._models_py3 import DocumentError
+ from ._models_py3 import DocumentKeyPhrases
+ from ._models_py3 import DocumentLanguage
+ from ._models_py3 import DocumentLinkedEntities
+ from ._models_py3 import DocumentSentiment
+ from ._models_py3 import DocumentStatistics
+ from ._models_py3 import EntitiesResult
+ from ._models_py3 import Entity
+ from ._models_py3 import EntityLinkingResult
+ from ._models_py3 import ErrorResponse, ErrorResponseException
+ from ._models_py3 import InnerError
+ from ._models_py3 import KeyPhraseResult
+ from ._models_py3 import LanguageBatchInput
+ from ._models_py3 import LanguageInput
+ from ._models_py3 import LanguageResult
+ from ._models_py3 import LinkedEntity
+ from ._models_py3 import Match
+ from ._models_py3 import MultiLanguageBatchInput
+ from ._models_py3 import MultiLanguageInput
+ from ._models_py3 import PiiDocumentEntities
+ from ._models_py3 import PiiEntitiesResult
+ from ._models_py3 import RequestStatistics
+ from ._models_py3 import SentenceAspect
+ from ._models_py3 import SentenceOpinion
+ from ._models_py3 import SentenceSentiment
+ from ._models_py3 import SentimentConfidenceScorePerLabel
+ from ._models_py3 import SentimentResponse
+ from ._models_py3 import TextAnalyticsError
+ from ._models_py3 import TextAnalyticsWarning
+except (SyntaxError, ImportError):
+ from ._models import AspectConfidenceScoreLabel
+ from ._models import AspectRelation
+ from ._models import DetectedLanguage
+ from ._models import DocumentEntities
+ from ._models import DocumentError
+ from ._models import DocumentKeyPhrases
+ from ._models import DocumentLanguage
+ from ._models import DocumentLinkedEntities
+ from ._models import DocumentSentiment
+ from ._models import DocumentStatistics
+ from ._models import EntitiesResult
+ from ._models import Entity
+ from ._models import EntityLinkingResult
+ from ._models import ErrorResponse, ErrorResponseException
+ from ._models import InnerError
+ from ._models import KeyPhraseResult
+ from ._models import LanguageBatchInput
+ from ._models import LanguageInput
+ from ._models import LanguageResult
+ from ._models import LinkedEntity
+ from ._models import Match
+ from ._models import MultiLanguageBatchInput
+ from ._models import MultiLanguageInput
+ from ._models import PiiDocumentEntities
+ from ._models import PiiEntitiesResult
+ from ._models import RequestStatistics
+ from ._models import SentenceAspect
+ from ._models import SentenceOpinion
+ from ._models import SentenceSentiment
+ from ._models import SentimentConfidenceScorePerLabel
+ from ._models import SentimentResponse
+ from ._models import TextAnalyticsError
+ from ._models import TextAnalyticsWarning
+from ._text_analytics_client_enums import (
+ AspectRelationType,
+ DocumentSentimentValue,
+ ErrorCodeValue,
+ InnerErrorCodeValue,
+ SentenceSentimentValue,
+ StringIndexType,
+ TokenSentimentValue,
+ WarningCodeValue,
+)
+
+__all__ = [
+ 'AspectConfidenceScoreLabel',
+ 'AspectRelation',
+ 'DetectedLanguage',
+ 'DocumentEntities',
+ 'DocumentError',
+ 'DocumentKeyPhrases',
+ 'DocumentLanguage',
+ 'DocumentLinkedEntities',
+ 'DocumentSentiment',
+ 'DocumentStatistics',
+ 'EntitiesResult',
+ 'Entity',
+ 'EntityLinkingResult',
+ 'ErrorResponse', 'ErrorResponseException',
+ 'InnerError',
+ 'KeyPhraseResult',
+ 'LanguageBatchInput',
+ 'LanguageInput',
+ 'LanguageResult',
+ 'LinkedEntity',
+ 'Match',
+ 'MultiLanguageBatchInput',
+ 'MultiLanguageInput',
+ 'PiiDocumentEntities',
+ 'PiiEntitiesResult',
+ 'RequestStatistics',
+ 'SentenceAspect',
+ 'SentenceOpinion',
+ 'SentenceSentiment',
+ 'SentimentConfidenceScorePerLabel',
+ 'SentimentResponse',
+ 'TextAnalyticsError',
+ 'TextAnalyticsWarning',
+ 'ErrorCodeValue',
+ 'InnerErrorCodeValue',
+ 'WarningCodeValue',
+ 'DocumentSentimentValue',
+ 'SentenceSentimentValue',
+ 'TokenSentimentValue',
+ 'AspectRelationType',
+ 'StringIndexType',
+]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models.py
new file mode 100644
index 000000000000..7d0a6eeb0dd6
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models.py
@@ -0,0 +1,1308 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+from msrest.exceptions import HttpOperationError
+
+
+class AspectConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AspectConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class AspectRelation(Model):
+ """AspectRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the aspect. Possible
+ values include: 'opinion', 'aspect'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'AspectRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AspectRelation, self).__init__(**kwargs)
+ self.relation_type = kwargs.get('relation_type', None)
+ self.ref = kwargs.get('ref', None)
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.iso6391_name = kwargs.get('iso6391_name', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_2.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.error = kwargs.get('error', None)
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.key_phrases = kwargs.get('key_phrases', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.detected_language = kwargs.get('detected_language', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well-known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.sentences = kwargs.get('sentences', None)
+ self.warnings = kwargs.get('warnings', None)
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = kwargs.get('characters_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type, such as Person/Location/Org/SSN
+ etc
+ :type category: str
+ :param subcategory: Entity sub type, such as Age/Year/TimeRange etc
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text.
+ :type offset: int
+ :param length: Required. Length for the entity text.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Entity, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.category = kwargs.get('category', None)
+ self.subcategory = kwargs.get('subcategory', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = kwargs.get('error', None)
+
+
+class ErrorResponseException(HttpOperationError):
+ """Server responsed with exception of type: 'ErrorResponse'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, deserialize, response, *args):
+
+ super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_2.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(InnerError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.details = kwargs.get('details', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.country_hint = kwargs.get('country_hint', None)
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_2.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing unique identifier of the recognized entity. Use in
+ conjunction with the Bing Entity Search API to fetch additional relevant
+ information.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.matches = kwargs.get('matches', None)
+ self.language = kwargs.get('language', None)
+ self.id = kwargs.get('id', None)
+ self.url = kwargs.get('url', None)
+ self.data_source = kwargs.get('data_source', None)
+ self.bing_id = kwargs.get('bing_id', None)
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = kwargs.get('confidence_score', None)
+ self.text = kwargs.get('text', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.language = kwargs.get('language', None)
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_2.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.redacted_text = kwargs.get('redacted_text', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class PiiEntitiesResult(Model):
+ """PiiEntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiEntitiesResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, **kwargs):
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = kwargs.get('documents_count', None)
+ self.valid_documents_count = kwargs.get('valid_documents_count', None)
+ self.erroneous_documents_count = kwargs.get('erroneous_documents_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class SentenceAspect(Model):
+ """SentenceAspect.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Aspect level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TokenSentimentValue
+ :param confidence_scores: Required. Aspect level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectConfidenceScoreLabel
+ :param offset: Required. The aspect offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the aspect.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param relations: Required. The array of either opinion or aspect object
+ which is related to the aspect.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.AspectRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[AspectRelation]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceAspect, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.relations = kwargs.get('relations', None)
+
+
+class SentenceOpinion(Model):
+ """SentenceOpinion.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Opinion level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TokenSentimentValue
+ :param confidence_scores: Required. Opinion level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectConfidenceScoreLabel
+ :param offset: Required. The opinion offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the opinion.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the opinion is
+ negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceOpinion, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.is_negated = kwargs.get('is_negated', None)
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param aspects: The array of aspect object for the sentence.
+ :type aspects:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceAspect]
+ :param opinions: The array of opinion object for the sentence.
+ :type opinions:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceOpinion]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'},
+ 'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.aspects = kwargs.get('aspects', None)
+ self.opinions = kwargs.get('opinions', None)
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.neutral = kwargs.get('neutral', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_2.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+ self.details = kwargs.get('details', None)
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target_ref = kwargs.get('target_ref', None)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models_py3.py
new file mode 100644
index 000000000000..d2810fe5a619
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_models_py3.py
@@ -0,0 +1,1308 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+from msrest.exceptions import HttpOperationError
+
+
+class AspectConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, negative: float, **kwargs) -> None:
+ super(AspectConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.negative = negative
+
+
+class AspectRelation(Model):
+ """AspectRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the aspect. Possible
+ values include: 'opinion', 'aspect'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'AspectRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, *, relation_type, ref: str, **kwargs) -> None:
+ super(AspectRelation, self).__init__(**kwargs)
+ self.relation_type = relation_type
+ self.ref = ref
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, name: str, iso6391_name: str, confidence_score: float, **kwargs) -> None:
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = name
+ self.iso6391_name = iso6391_name
+ self.confidence_score = confidence_score
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_2.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, id: str, error, **kwargs) -> None:
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = id
+ self.error = error
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, key_phrases, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = id
+ self.key_phrases = key_phrases
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, detected_language, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = id
+ self.detected_language = detected_language
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well-known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, *, id: str, sentiment, confidence_scores, sentences, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = id
+ self.sentiment = sentiment
+ self.statistics = statistics
+ self.confidence_scores = confidence_scores
+ self.sentences = sentences
+ self.warnings = warnings
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, *, characters_count: int, transactions_count: int, **kwargs) -> None:
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = characters_count
+ self.transactions_count = transactions_count
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type, such as Person/Location/Org/SSN
+ etc
+ :type category: str
+ :param subcategory: Entity sub type, such as Age/Year/TimeRange etc
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text.
+ :type offset: int
+ :param length: Required. Length for the entity text.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, subcategory: str=None, **kwargs) -> None:
+ super(Entity, self).__init__(**kwargs)
+ self.text = text
+ self.category = category
+ self.subcategory = subcategory
+ self.offset = offset
+ self.length = length
+ self.confidence_score = confidence_score
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, error, **kwargs) -> None:
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = error
+
+
+class ErrorResponseException(HttpOperationError):
+ """Server responsed with exception of type: 'ErrorResponse'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, deserialize, response, *args):
+
+ super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_2.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, *, code, message: str, details=None, target: str=None, innererror=None, **kwargs) -> None:
+ super(InnerError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.details = details
+ self.target = target
+ self.innererror = innererror
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, country_hint: str=None, **kwargs) -> None:
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.country_hint = country_hint
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_2.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing unique identifier of the recognized entity. Use in
+ conjunction with the Bing Entity Search API to fetch additional relevant
+ information.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, *, name: str, matches, language: str, url: str, data_source: str, id: str=None, bing_id: str=None, **kwargs) -> None:
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = name
+ self.matches = matches
+ self.language = language
+ self.id = id
+ self.url = url
+ self.data_source = data_source
+ self.bing_id = bing_id
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, *, confidence_score: float, text: str, offset: int, length: int, **kwargs) -> None:
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = confidence_score
+ self.text = text
+ self.offset = offset
+ self.length = length
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, language: str=None, **kwargs) -> None:
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.language = language
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_2.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, redacted_text: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.redacted_text = redacted_text
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class PiiEntitiesResult(Model):
+ """PiiEntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(PiiEntitiesResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, *, documents_count: int, valid_documents_count: int, erroneous_documents_count: int, transactions_count: int, **kwargs) -> None:
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = documents_count
+ self.valid_documents_count = valid_documents_count
+ self.erroneous_documents_count = erroneous_documents_count
+ self.transactions_count = transactions_count
+
+
+class SentenceAspect(Model):
+ """SentenceAspect.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Aspect level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TokenSentimentValue
+ :param confidence_scores: Required. Aspect level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectConfidenceScoreLabel
+ :param offset: Required. The aspect offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the aspect.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param relations: Required. The array of either opinion or aspect object
+ which is related to the aspect.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.AspectRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[AspectRelation]'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, relations, **kwargs) -> None:
+ super(SentenceAspect, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.relations = relations
+
+
+class SentenceOpinion(Model):
+ """SentenceOpinion.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Opinion level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.TokenSentimentValue
+ :param confidence_scores: Required. Opinion level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.AspectConfidenceScoreLabel
+ :param offset: Required. The opinion offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the opinion.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the opinion is
+ negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, is_negated: bool, **kwargs) -> None:
+ super(SentenceOpinion, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.is_negated = is_negated
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param aspects: The array of aspect object for the sentence.
+ :type aspects:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceAspect]
+ :param opinions: The array of opinion object for the sentence.
+ :type opinions:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.SentenceOpinion]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'},
+ 'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'},
+ }
+
+ def __init__(self, *, text: str, sentiment, confidence_scores, offset: int, length: int, aspects=None, opinions=None, **kwargs) -> None:
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = text
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.aspects = aspects
+ self.opinions = opinions
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, neutral: float, negative: float, **kwargs) -> None:
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.neutral = neutral
+ self.negative = negative
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_2.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, *, code, message: str, target: str=None, innererror=None, details=None, **kwargs) -> None:
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target = target
+ self.innererror = innererror
+ self.details = details
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, *, code, message: str, target_ref: str=None, **kwargs) -> None:
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target_ref = target_ref
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_text_analytics_client_enums.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_text_analytics_client_enums.py
new file mode 100644
index 000000000000..3d0d462d878e
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/models/_text_analytics_client_enums.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+
+
+class ErrorCodeValue(str, Enum):
+
+ invalid_request = "InvalidRequest"
+ invalid_argument = "InvalidArgument"
+ internal_server_error = "InternalServerError"
+ service_unavailable = "ServiceUnavailable"
+
+
+class InnerErrorCodeValue(str, Enum):
+
+ invalid_parameter_value = "InvalidParameterValue"
+ invalid_request_body_format = "InvalidRequestBodyFormat"
+ empty_request = "EmptyRequest"
+ missing_input_records = "MissingInputRecords"
+ invalid_document = "InvalidDocument"
+ model_version_incorrect = "ModelVersionIncorrect"
+ invalid_document_batch = "InvalidDocumentBatch"
+ unsupported_language_code = "UnsupportedLanguageCode"
+ invalid_country_hint = "InvalidCountryHint"
+
+
+class WarningCodeValue(str, Enum):
+
+ long_words_in_document = "LongWordsInDocument"
+ document_truncated = "DocumentTruncated"
+
+
+class DocumentSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+ mixed = "mixed"
+
+
+class SentenceSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+
+
+class TokenSentimentValue(str, Enum):
+
+ positive = "positive"
+ mixed = "mixed"
+ negative = "negative"
+
+
+class AspectRelationType(str, Enum):
+
+ opinion = "opinion"
+ aspect = "aspect"
+
+
+class StringIndexType(str, Enum):
+
+ text_elements_v8 = "TextElements_v8" #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is written in .Net Framework or .Net Core and you will be using StringInfo.
+ unicode_code_point = "UnicodeCodePoint" #: Returned offset and length values will correspond to Unicode code points. Use this option if your application is written in a language that support Unicode, for example Python.
+ utf16_code_unit = "Utf16CodeUnit" #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your application is written in a language that support Unicode, for example Java, JavaScript.
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/__init__.py
similarity index 77%
rename from sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/__init__.py
rename to sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/__init__.py
index e6429ee824b7..e87e22b49362 100644
--- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/aio/operations_async/__init__.py
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/__init__.py
@@ -1,12 +1,15 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License. See License.txt in the project root for license information.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
# Code generated by Microsoft (R) AutoRest Code Generator.
-# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
# --------------------------------------------------------------------------
-from ._text_analytics_client_operations_async import TextAnalyticsClientOperationsMixin
+from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin
__all__ = [
'TextAnalyticsClientOperationsMixin',
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/_text_analytics_client_operations.py
new file mode 100644
index 000000000000..42b8dbac5b25
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/operations/_text_analytics_client_operations.py
@@ -0,0 +1,517 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.pipeline import ClientRawResponse
+from .. import models
+
+
+class TextAnalyticsClientOperationsMixin(object):
+
+ def entities_recognition_general(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Named Entity Recognition.
+
+ The API returns a list of general named entities in a given document.
+ For the list of supported entity types, check Supported Entity Types in Text Analytics
+ API. See the Supported languages
+ in Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: EntitiesResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_2.models.EntitiesResult
+ or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_general.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntitiesResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_general.metadata = {'url': '/entities/recognition/general'}
+
+ def entities_recognition_pii(
+ self, documents, model_version=None, show_stats=None, domain=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Entities containing personal information.
+
+ The API returns a list of entities with personal information (\"SSN\",
+ \"Bank Account\" etc) in the document. For the list of supported entity
+ types, check Supported Entity Types
+ in Text Analytics API. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+ .
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param domain: (Optional) if set to 'PHI', response will contain only
+ PHI entities.
+ :type domain: str
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: PiiEntitiesResult or ClientRawResponse if raw=true
+ :rtype:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.PiiEntitiesResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_pii.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if domain is not None:
+ query_parameters['domain'] = self._serialize.query("domain", domain, 'str')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PiiEntitiesResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_pii.metadata = {'url': '/entities/recognition/pii'}
+
+ def entities_linking(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Linked entities from a well-known knowledge base.
+
+ The API returns a list of recognized entities with links to a
+ well-known knowledge base. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: EntityLinkingResult or ClientRawResponse if raw=true
+ :rtype:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.EntityLinkingResult or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_linking.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntityLinkingResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_linking.metadata = {'url': '/entities/linking'}
+
+ def key_phrases(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Key Phrases.
+
+ The API returns a list of strings denoting the key phrases in the input
+ text. See the Supported languages in
+ Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: KeyPhraseResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_2.models.KeyPhraseResult
+ or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.key_phrases.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('KeyPhraseResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ key_phrases.metadata = {'url': '/keyPhrases'}
+
+ def languages(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Detect Language.
+
+ The API returns the detected language and a numeric score between 0 and
+ 1. Scores close to 1 indicate 100% certainty that the identified
+ language is true. See the Supported
+ languages in Text Analytics API for the list of enabled languages.
+
+ :param documents:
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.LanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: LanguageResult or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_2.models.LanguageResult
+ or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.LanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.languages.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'LanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('LanguageResult', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ languages.metadata = {'url': '/languages'}
+
+ def sentiment(
+ self, documents, model_version=None, show_stats=None, opinion_mining=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Sentiment.
+
+ The API returns a detailed sentiment analysis for the input text. The
+ analysis is done in multiple levels of granularity, start from the a
+ document level, down to sentence and key terms (aspects) and opinions.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_2.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param opinion_mining: (Optional) if set to true, response will
+ contain input and document level statistics including aspect-based
+ sentiment analysis results.
+ :type opinion_mining: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_2.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: SentimentResponse or ClientRawResponse if raw=true
+ :rtype:
+ ~azure.ai.textanalytics.v3_1_preview_2.models.SentimentResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`ErrorResponseException`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.sentiment.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if opinion_mining is not None:
+ query_parameters['opinionMining'] = self._serialize.query("opinion_mining", opinion_mining, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200]:
+ raise models.ErrorResponseException(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SentimentResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ sentiment.metadata = {'url': '/sentiment'}
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/version.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/version.py
new file mode 100644
index 000000000000..b8ffb04f789f
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_2/version.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+VERSION = "0.0.1"
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/__init__.py
new file mode 100644
index 000000000000..48d59a14cb3c
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/__init__.py
@@ -0,0 +1,19 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._configuration import TextAnalyticsClientConfiguration
+from ._text_analytics_client import TextAnalyticsClient
+__all__ = ['TextAnalyticsClient', 'TextAnalyticsClientConfiguration']
+
+from .version import VERSION
+
+__version__ = VERSION
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_configuration.py
new file mode 100644
index 000000000000..f3c85f323d16
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_configuration.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest import Configuration
+
+from .version import VERSION
+
+
+class TextAnalyticsClientConfiguration(Configuration):
+ """Configuration for TextAnalyticsClient
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ if endpoint is None:
+ raise ValueError("Parameter 'endpoint' must not be None.")
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ base_url = '{Endpoint}/text/analytics/v3.1-preview.3'
+
+ super(TextAnalyticsClientConfiguration, self).__init__(base_url)
+
+ # Starting Autorest.Python 4.0.64, make connection pool activated by default
+ self.keep_alive = True
+
+ self.add_user_agent('azure-ai-textanalytics/{}'.format(VERSION))
+
+ self.endpoint = endpoint
+ self.credentials = credentials
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_text_analytics_client.py
new file mode 100644
index 000000000000..633299e0717b
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/_text_analytics_client.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.service_client import SDKClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import TextAnalyticsClientConfiguration
+from .operations import TextAnalyticsClientOperationsMixin
+from msrest.exceptions import HttpOperationError
+from . import models
+
+
+class TextAnalyticsClient(TextAnalyticsClientOperationsMixin, SDKClient):
+ """The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Further documentation can be found in <a href="https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview">https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview</a>
+
+ :ivar config: Configuration for client.
+ :vartype config: TextAnalyticsClientConfiguration
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ self.config = TextAnalyticsClientConfiguration(endpoint, credentials)
+ super(TextAnalyticsClient, self).__init__(self.config.credentials, self.config)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = 'v3.1-preview.3'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/__init__.py
new file mode 100644
index 000000000000..a044b77e5b63
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/__init__.py
@@ -0,0 +1,218 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import AnalyzeBatchInput
+ from ._models_py3 import AnalyzeJobMetadata
+ from ._models_py3 import AnalyzeJobState
+ from ._models_py3 import AnalyzeJobStateTasks
+ from ._models_py3 import AnalyzeJobStateTasksDetails
+ from ._models_py3 import AnalyzeJobStateTasksEntityRecognitionPiiTasksItem
+ from ._models_py3 import AnalyzeJobStateTasksEntityRecognitionTasksItem
+ from ._models_py3 import AnalyzeJobStateTasksKeyPhraseExtractionTasksItem
+ from ._models_py3 import AspectConfidenceScoreLabel
+ from ._models_py3 import AspectRelation
+ from ._models_py3 import DetectedLanguage
+ from ._models_py3 import DocumentEntities
+ from ._models_py3 import DocumentError
+ from ._models_py3 import DocumentHealthcareEntities
+ from ._models_py3 import DocumentKeyPhrases
+ from ._models_py3 import DocumentLanguage
+ from ._models_py3 import DocumentLinkedEntities
+ from ._models_py3 import DocumentSentiment
+ from ._models_py3 import DocumentStatistics
+ from ._models_py3 import EntitiesResult
+ from ._models_py3 import EntitiesTask
+ from ._models_py3 import EntitiesTaskParameters
+ from ._models_py3 import Entity
+ from ._models_py3 import EntityLinkingResult
+ from ._models_py3 import ErrorResponse
+ from ._models_py3 import HealthcareEntity
+ from ._models_py3 import HealthcareEntityLink
+ from ._models_py3 import HealthcareJobState
+ from ._models_py3 import HealthcareRelation
+ from ._models_py3 import HealthcareResult
+ from ._models_py3 import InnerError
+ from ._models_py3 import JobDescriptor
+ from ._models_py3 import JobManifest
+ from ._models_py3 import JobManifestTasks
+ from ._models_py3 import JobMetadata
+ from ._models_py3 import KeyPhraseResult
+ from ._models_py3 import KeyPhrasesTask
+ from ._models_py3 import KeyPhrasesTaskParameters
+ from ._models_py3 import LanguageBatchInput
+ from ._models_py3 import LanguageInput
+ from ._models_py3 import LanguageResult
+ from ._models_py3 import LinkedEntity
+ from ._models_py3 import Match
+ from ._models_py3 import MultiLanguageBatchInput
+ from ._models_py3 import MultiLanguageInput
+ from ._models_py3 import Pagination
+ from ._models_py3 import PiiDocumentEntities
+ from ._models_py3 import PiiResult
+ from ._models_py3 import PiiTask
+ from ._models_py3 import PiiTaskParameters
+ from ._models_py3 import RequestStatistics
+ from ._models_py3 import SentenceAspect
+ from ._models_py3 import SentenceOpinion
+ from ._models_py3 import SentenceSentiment
+ from ._models_py3 import SentimentConfidenceScorePerLabel
+ from ._models_py3 import SentimentResponse
+ from ._models_py3 import TasksState
+ from ._models_py3 import TaskState
+ from ._models_py3 import TextAnalyticsError
+ from ._models_py3 import TextAnalyticsWarning
+except (SyntaxError, ImportError):
+ from ._models import AnalyzeBatchInput
+ from ._models import AnalyzeJobMetadata
+ from ._models import AnalyzeJobState
+ from ._models import AnalyzeJobStateTasks
+ from ._models import AnalyzeJobStateTasksDetails
+ from ._models import AnalyzeJobStateTasksEntityRecognitionPiiTasksItem
+ from ._models import AnalyzeJobStateTasksEntityRecognitionTasksItem
+ from ._models import AnalyzeJobStateTasksKeyPhraseExtractionTasksItem
+ from ._models import AspectConfidenceScoreLabel
+ from ._models import AspectRelation
+ from ._models import DetectedLanguage
+ from ._models import DocumentEntities
+ from ._models import DocumentError
+ from ._models import DocumentHealthcareEntities
+ from ._models import DocumentKeyPhrases
+ from ._models import DocumentLanguage
+ from ._models import DocumentLinkedEntities
+ from ._models import DocumentSentiment
+ from ._models import DocumentStatistics
+ from ._models import EntitiesResult
+ from ._models import EntitiesTask
+ from ._models import EntitiesTaskParameters
+ from ._models import Entity
+ from ._models import EntityLinkingResult
+ from ._models import ErrorResponse
+ from ._models import HealthcareEntity
+ from ._models import HealthcareEntityLink
+ from ._models import HealthcareJobState
+ from ._models import HealthcareRelation
+ from ._models import HealthcareResult
+ from ._models import InnerError
+ from ._models import JobDescriptor
+ from ._models import JobManifest
+ from ._models import JobManifestTasks
+ from ._models import JobMetadata
+ from ._models import KeyPhraseResult
+ from ._models import KeyPhrasesTask
+ from ._models import KeyPhrasesTaskParameters
+ from ._models import LanguageBatchInput
+ from ._models import LanguageInput
+ from ._models import LanguageResult
+ from ._models import LinkedEntity
+ from ._models import Match
+ from ._models import MultiLanguageBatchInput
+ from ._models import MultiLanguageInput
+ from ._models import Pagination
+ from ._models import PiiDocumentEntities
+ from ._models import PiiResult
+ from ._models import PiiTask
+ from ._models import PiiTaskParameters
+ from ._models import RequestStatistics
+ from ._models import SentenceAspect
+ from ._models import SentenceOpinion
+ from ._models import SentenceSentiment
+ from ._models import SentimentConfidenceScorePerLabel
+ from ._models import SentimentResponse
+ from ._models import TasksState
+ from ._models import TaskState
+ from ._models import TextAnalyticsError
+ from ._models import TextAnalyticsWarning
+from ._text_analytics_client_enums import (
+ AspectRelationType,
+ DocumentSentimentValue,
+ ErrorCodeValue,
+ InnerErrorCodeValue,
+ SentenceSentimentValue,
+ State,
+ StringIndexType,
+ StringIndexTypeResponse,
+ TokenSentimentValue,
+ WarningCodeValue,
+)
+
+__all__ = [
+ 'AnalyzeBatchInput',
+ 'AnalyzeJobMetadata',
+ 'AnalyzeJobState',
+ 'AnalyzeJobStateTasks',
+ 'AnalyzeJobStateTasksDetails',
+ 'AnalyzeJobStateTasksEntityRecognitionPiiTasksItem',
+ 'AnalyzeJobStateTasksEntityRecognitionTasksItem',
+ 'AnalyzeJobStateTasksKeyPhraseExtractionTasksItem',
+ 'AspectConfidenceScoreLabel',
+ 'AspectRelation',
+ 'DetectedLanguage',
+ 'DocumentEntities',
+ 'DocumentError',
+ 'DocumentHealthcareEntities',
+ 'DocumentKeyPhrases',
+ 'DocumentLanguage',
+ 'DocumentLinkedEntities',
+ 'DocumentSentiment',
+ 'DocumentStatistics',
+ 'EntitiesResult',
+ 'EntitiesTask',
+ 'EntitiesTaskParameters',
+ 'Entity',
+ 'EntityLinkingResult',
+ 'ErrorResponse',
+ 'HealthcareEntity',
+ 'HealthcareEntityLink',
+ 'HealthcareJobState',
+ 'HealthcareRelation',
+ 'HealthcareResult',
+ 'InnerError',
+ 'JobDescriptor',
+ 'JobManifest',
+ 'JobManifestTasks',
+ 'JobMetadata',
+ 'KeyPhraseResult',
+ 'KeyPhrasesTask',
+ 'KeyPhrasesTaskParameters',
+ 'LanguageBatchInput',
+ 'LanguageInput',
+ 'LanguageResult',
+ 'LinkedEntity',
+ 'Match',
+ 'MultiLanguageBatchInput',
+ 'MultiLanguageInput',
+ 'Pagination',
+ 'PiiDocumentEntities',
+ 'PiiResult',
+ 'PiiTask',
+ 'PiiTaskParameters',
+ 'RequestStatistics',
+ 'SentenceAspect',
+ 'SentenceOpinion',
+ 'SentenceSentiment',
+ 'SentimentConfidenceScorePerLabel',
+ 'SentimentResponse',
+ 'TasksState',
+ 'TaskState',
+ 'TextAnalyticsError',
+ 'TextAnalyticsWarning',
+ 'StringIndexTypeResponse',
+ 'ErrorCodeValue',
+ 'InnerErrorCodeValue',
+ 'WarningCodeValue',
+ 'DocumentSentimentValue',
+ 'SentenceSentimentValue',
+ 'TokenSentimentValue',
+ 'AspectRelationType',
+ 'State',
+ 'StringIndexType',
+]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models.py
new file mode 100644
index 000000000000..d69d23092589
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models.py
@@ -0,0 +1,2155 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+
+
+class AnalyzeBatchInput(Model):
+ """AnalyzeBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ :param analysis_input: Required.
+ :type analysis_input:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageBatchInput
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'analysis_input': {'required': True},
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'},
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeBatchInput, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+ self.analysis_input = kwargs.get('analysis_input', None)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class JobMetadata(Model):
+ """JobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobMetadata, self).__init__(**kwargs)
+ self.created_date_time = kwargs.get('created_date_time', None)
+ self.expiration_date_time = kwargs.get('expiration_date_time', None)
+ self.job_id = kwargs.get('job_id', None)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.status = kwargs.get('status', None)
+
+
+class AnalyzeJobMetadata(JobMetadata):
+ """AnalyzeJobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ :param display_name:
+ :type display_name: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobMetadata, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+
+
+class AnalyzeJobState(Model):
+ """AnalyzeJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name:
+ :type display_name: str
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasks
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobState, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+ self.tasks = kwargs.get('tasks', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class AnalyzeJobStateTasks(Model):
+ """AnalyzeJobStateTasks.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param details:
+ :type details:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksDetails
+ :param completed: Required.
+ :type completed: int
+ :param failed: Required.
+ :type failed: int
+ :param in_progress: Required.
+ :type in_progress: int
+ :param total: Required.
+ :type total: int
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksEntityRecognitionTasksItem]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]
+ """
+
+ _validation = {
+ 'completed': {'required': True},
+ 'failed': {'required': True},
+ 'in_progress': {'required': True},
+ 'total': {'required': True},
+ }
+
+ _attribute_map = {
+ 'details': {'key': 'details', 'type': 'AnalyzeJobStateTasksDetails'},
+ 'completed': {'key': 'completed', 'type': 'int'},
+ 'failed': {'key': 'failed', 'type': 'int'},
+ 'in_progress': {'key': 'inProgress', 'type': 'int'},
+ 'total': {'key': 'total', 'type': 'int'},
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionTasksItem]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasks, self).__init__(**kwargs)
+ self.details = kwargs.get('details', None)
+ self.completed = kwargs.get('completed', None)
+ self.failed = kwargs.get('failed', None)
+ self.in_progress = kwargs.get('in_progress', None)
+ self.total = kwargs.get('total', None)
+ self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None)
+ self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None)
+ self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None)
+
+
+class TaskState(Model):
+ """TaskState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TaskState, self).__init__(**kwargs)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.name = kwargs.get('name', None)
+ self.status = kwargs.get('status', None)
+
+
+class AnalyzeJobStateTasksDetails(TaskState):
+ """AnalyzeJobStateTasksDetails.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksDetails, self).__init__(**kwargs)
+
+
+class AnalyzeJobStateTasksEntityRecognitionPiiTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionPiiTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results: ~azure.ai.textanalytics.v3_1_preview_3.models.PiiResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'PiiResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksEntityRecognitionPiiTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AnalyzeJobStateTasksEntityRecognitionTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntitiesResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksEntityRecognitionTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AnalyzeJobStateTasksKeyPhraseExtractionTasksItem(TaskState):
+ """AnalyzeJobStateTasksKeyPhraseExtractionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhraseResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'KeyPhraseResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksKeyPhraseExtractionTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AspectConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AspectConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class AspectRelation(Model):
+ """AspectRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the aspect. Possible
+ values include: 'opinion', 'aspect'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'AspectRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AspectRelation, self).__init__(**kwargs)
+ self.relation_type = kwargs.get('relation_type', None)
+ self.ref = kwargs.get('ref', None)
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.iso6391_name = kwargs.get('iso6391_name', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.error = kwargs.get('error', None)
+
+
+class DocumentHealthcareEntities(Model):
+ """DocumentHealthcareEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Healthcare entities.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareEntity]
+ :param relations: Required. Healthcare entity relations.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareRelation]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'relations': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'},
+ 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentHealthcareEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.relations = kwargs.get('relations', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.key_phrases = kwargs.get('key_phrases', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.detected_language = kwargs.get('detected_language', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well-known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.sentences = kwargs.get('sentences', None)
+ self.warnings = kwargs.get('warnings', None)
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = kwargs.get('characters_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class EntitiesTask(Model):
+ """EntitiesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class EntitiesTaskParameters(Model):
+ """EntitiesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'StringIndexTypeResponse'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesTaskParameters, self).__init__(**kwargs)
+ self.model_version = kwargs.get('model_version', "latest")
+ self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Entity, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.category = kwargs.get('category', None)
+ self.subcategory = kwargs.get('subcategory', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = kwargs.get('error', None)
+
+
+class HealthcareEntity(Entity):
+ """HealthcareEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ :param is_negated: Required.
+ :type is_negated: bool
+ :param links: Entity references in known data sources.
+ :type links:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareEntityLink]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareEntity, self).__init__(**kwargs)
+ self.is_negated = kwargs.get('is_negated', None)
+ self.links = kwargs.get('links', None)
+
+
+class HealthcareEntityLink(Model):
+ """HealthcareEntityLink.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV,
+ MSH, etc.
+ :type data_source: str
+ :param id: Required. Entity id in the given source catalog.
+ :type id: str
+ """
+
+ _validation = {
+ 'data_source': {'required': True},
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareEntityLink, self).__init__(**kwargs)
+ self.data_source = kwargs.get('data_source', None)
+ self.id = kwargs.get('id', None)
+
+
+class HealthcareJobState(Model):
+ """HealthcareJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareResult
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'results': {'key': 'results', 'type': 'HealthcareResult'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareJobState, self).__init__(**kwargs)
+ self.created_date_time = kwargs.get('created_date_time', None)
+ self.expiration_date_time = kwargs.get('expiration_date_time', None)
+ self.job_id = kwargs.get('job_id', None)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.status = kwargs.get('status', None)
+ self.results = kwargs.get('results', None)
+ self.errors = kwargs.get('errors', None)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class HealthcareRelation(Model):
+ """HealthcareRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. Type of relation. Examples include:
+ `DosageOfMedication` or 'FrequencyOfMedication', etc.
+ :type relation_type: str
+ :param bidirectional: Required. If true the relation between the entities
+ is bidirectional, otherwise directionality is source to target.
+ :type bidirectional: bool
+ :param source: Required. Reference link to the source entity.
+ :type source: str
+ :param target: Required. Reference link to the target entity.
+ :type target: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'bidirectional': {'required': True},
+ 'source': {'required': True},
+ 'target': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'str'},
+ 'bidirectional': {'key': 'bidirectional', 'type': 'bool'},
+ 'source': {'key': 'source', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareRelation, self).__init__(**kwargs)
+ self.relation_type = kwargs.get('relation_type', None)
+ self.bidirectional = kwargs.get('bidirectional', None)
+ self.source = kwargs.get('source', None)
+ self.target = kwargs.get('target', None)
+
+
+class HealthcareResult(Model):
+ """HealthcareResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentHealthcareEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_3.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(InnerError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.details = kwargs.get('details', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+
+
+class JobDescriptor(Model):
+ """JobDescriptor.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ """
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobDescriptor, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+
+
+class JobManifest(Model):
+ """JobManifest.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobManifest, self).__init__(**kwargs)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class JobManifestTasks(Model):
+ """The set of tasks to execute on the input documents. Cannot specify the same
+ task more than once.
+
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesTask]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.PiiTask]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhrasesTask]
+ """
+
+ _attribute_map = {
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobManifestTasks, self).__init__(**kwargs)
+ self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None)
+ self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None)
+ self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None)
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class KeyPhrasesTask(Model):
+ """KeyPhrasesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhrasesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhrasesTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class KeyPhrasesTaskParameters(Model):
+ """KeyPhrasesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhrasesTaskParameters, self).__init__(**kwargs)
+ self.model_version = kwargs.get('model_version', "latest")
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.country_hint = kwargs.get('country_hint', None)
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_3.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing Entity Search API unique identifier of the recognized
+ entity.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.matches = kwargs.get('matches', None)
+ self.language = kwargs.get('language', None)
+ self.id = kwargs.get('id', None)
+ self.url = kwargs.get('url', None)
+ self.data_source = kwargs.get('data_source', None)
+ self.bing_id = kwargs.get('bing_id', None)
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = kwargs.get('confidence_score', None)
+ self.text = kwargs.get('text', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.language = kwargs.get('language', None)
+
+
+class Pagination(Model):
+ """Pagination.
+
+ :param next_link:
+ :type next_link: str
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Pagination, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.redacted_text = kwargs.get('redacted_text', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class PiiResult(Model):
+ """PiiResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class PiiTask(Model):
+ """PiiTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.PiiTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class PiiTaskParameters(Model):
+ """PiiTaskParameters.
+
+ :param domain: Possible values include: 'phi', 'none'. Default value:
+ "none" .
+ :type domain: str or ~azure.ai.textanalytics.v3_1_preview_3.models.enum
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'domain': {'key': 'domain', 'type': 'str'},
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'StringIndexTypeResponse'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiTaskParameters, self).__init__(**kwargs)
+ self.domain = kwargs.get('domain', "none")
+ self.model_version = kwargs.get('model_version', "latest")
+ self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, **kwargs):
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = kwargs.get('documents_count', None)
+ self.valid_documents_count = kwargs.get('valid_documents_count', None)
+ self.erroneous_documents_count = kwargs.get('erroneous_documents_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class SentenceAspect(Model):
+ """SentenceAspect.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Aspect level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue
+ :param confidence_scores: Required. Aspect level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel
+ :param offset: Required. The aspect offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the aspect.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param relations: Required. The array of either opinion or aspect object
+ which is related to the aspect.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AspectRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[AspectRelation]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceAspect, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.relations = kwargs.get('relations', None)
+
+
+class SentenceOpinion(Model):
+ """SentenceOpinion.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Opinion level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue
+ :param confidence_scores: Required. Opinion level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel
+ :param offset: Required. The opinion offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the opinion.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the opinion is
+ negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceOpinion, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.is_negated = kwargs.get('is_negated', None)
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param aspects: The array of aspect object for the sentence.
+ :type aspects:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceAspect]
+ :param opinions: The array of opinion object for the sentence.
+ :type opinions:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceOpinion]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'},
+ 'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.aspects = kwargs.get('aspects', None)
+ self.opinions = kwargs.get('opinions', None)
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.neutral = kwargs.get('neutral', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class TasksState(Model):
+ """TasksState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TasksState, self).__init__(**kwargs)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable', 'NotFound'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_3.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+ self.details = kwargs.get('details', None)
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target_ref = kwargs.get('target_ref', None)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models_py3.py
new file mode 100644
index 000000000000..90aaaa01dfb1
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_models_py3.py
@@ -0,0 +1,2155 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+
+
+class AnalyzeBatchInput(Model):
+ """AnalyzeBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ :param analysis_input: Required.
+ :type analysis_input:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageBatchInput
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'analysis_input': {'required': True},
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'},
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, *, analysis_input, tasks, display_name: str=None, **kwargs) -> None:
+ super(AnalyzeBatchInput, self).__init__(**kwargs)
+ self.display_name = display_name
+ self.analysis_input = analysis_input
+ self.tasks = tasks
+
+
+class JobMetadata(Model):
+ """JobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, **kwargs) -> None:
+ super(JobMetadata, self).__init__(**kwargs)
+ self.created_date_time = created_date_time
+ self.expiration_date_time = expiration_date_time
+ self.job_id = job_id
+ self.last_update_date_time = last_update_date_time
+ self.status = status
+
+
+class AnalyzeJobMetadata(JobMetadata):
+ """AnalyzeJobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ :param display_name:
+ :type display_name: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, display_name: str=None, **kwargs) -> None:
+ super(AnalyzeJobMetadata, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, **kwargs)
+ self.display_name = display_name
+
+
+class AnalyzeJobState(Model):
+ """AnalyzeJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name:
+ :type display_name: str
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasks
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, tasks, display_name: str=None, errors=None, statistics=None, next_link: str=None, **kwargs) -> None:
+ super(AnalyzeJobState, self).__init__(**kwargs)
+ self.display_name = display_name
+ self.tasks = tasks
+ self.errors = errors
+ self.statistics = statistics
+ self.next_link = next_link
+
+
+class AnalyzeJobStateTasks(Model):
+ """AnalyzeJobStateTasks.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param details:
+ :type details:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksDetails
+ :param completed: Required.
+ :type completed: int
+ :param failed: Required.
+ :type failed: int
+ :param in_progress: Required.
+ :type in_progress: int
+ :param total: Required.
+ :type total: int
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksEntityRecognitionTasksItem]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]
+ """
+
+ _validation = {
+ 'completed': {'required': True},
+ 'failed': {'required': True},
+ 'in_progress': {'required': True},
+ 'total': {'required': True},
+ }
+
+ _attribute_map = {
+ 'details': {'key': 'details', 'type': 'AnalyzeJobStateTasksDetails'},
+ 'completed': {'key': 'completed', 'type': 'int'},
+ 'failed': {'key': 'failed', 'type': 'int'},
+ 'in_progress': {'key': 'inProgress', 'type': 'int'},
+ 'total': {'key': 'total', 'type': 'int'},
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionTasksItem]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]'},
+ }
+
+ def __init__(self, *, completed: int, failed: int, in_progress: int, total: int, details=None, entity_recognition_tasks=None, entity_recognition_pii_tasks=None, key_phrase_extraction_tasks=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasks, self).__init__(**kwargs)
+ self.details = details
+ self.completed = completed
+ self.failed = failed
+ self.in_progress = in_progress
+ self.total = total
+ self.entity_recognition_tasks = entity_recognition_tasks
+ self.entity_recognition_pii_tasks = entity_recognition_pii_tasks
+ self.key_phrase_extraction_tasks = key_phrase_extraction_tasks
+
+
+class TaskState(Model):
+ """TaskState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, **kwargs) -> None:
+ super(TaskState, self).__init__(**kwargs)
+ self.last_update_date_time = last_update_date_time
+ self.name = name
+ self.status = status
+
+
+class AnalyzeJobStateTasksDetails(TaskState):
+ """AnalyzeJobStateTasksDetails.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksDetails, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+
+
+class AnalyzeJobStateTasksEntityRecognitionPiiTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionPiiTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results: ~azure.ai.textanalytics.v3_1_preview_3.models.PiiResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'PiiResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, results, name: str=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksEntityRecognitionPiiTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AnalyzeJobStateTasksEntityRecognitionTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntitiesResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, results, name: str=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksEntityRecognitionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AnalyzeJobStateTasksKeyPhraseExtractionTasksItem(TaskState):
+ """AnalyzeJobStateTasksKeyPhraseExtractionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results: Required.
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhraseResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ 'results': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'KeyPhraseResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, results, name: str=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksKeyPhraseExtractionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AspectConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, negative: float, **kwargs) -> None:
+ super(AspectConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.negative = negative
+
+
+class AspectRelation(Model):
+ """AspectRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the aspect. Possible
+ values include: 'opinion', 'aspect'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'AspectRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, *, relation_type, ref: str, **kwargs) -> None:
+ super(AspectRelation, self).__init__(**kwargs)
+ self.relation_type = relation_type
+ self.ref = ref
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, name: str, iso6391_name: str, confidence_score: float, **kwargs) -> None:
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = name
+ self.iso6391_name = iso6391_name
+ self.confidence_score = confidence_score
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, id: str, error, **kwargs) -> None:
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = id
+ self.error = error
+
+
+class DocumentHealthcareEntities(Model):
+ """DocumentHealthcareEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Healthcare entities.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareEntity]
+ :param relations: Required. Healthcare entity relations.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareRelation]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'relations': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'},
+ 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, relations, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentHealthcareEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.relations = relations
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, key_phrases, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = id
+ self.key_phrases = key_phrases
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, detected_language, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = id
+ self.detected_language = detected_language
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well-known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, *, id: str, sentiment, confidence_scores, sentences, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = id
+ self.sentiment = sentiment
+ self.statistics = statistics
+ self.confidence_scores = confidence_scores
+ self.sentences = sentences
+ self.warnings = warnings
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, *, characters_count: int, transactions_count: int, **kwargs) -> None:
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = characters_count
+ self.transactions_count = transactions_count
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class EntitiesTask(Model):
+ """EntitiesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(EntitiesTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class EntitiesTaskParameters(Model):
+ """EntitiesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'StringIndexTypeResponse'},
+ }
+
+ def __init__(self, *, model_version: str="latest", string_index_type="TextElements_v8", **kwargs) -> None:
+ super(EntitiesTaskParameters, self).__init__(**kwargs)
+ self.model_version = model_version
+ self.string_index_type = string_index_type
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, subcategory: str=None, **kwargs) -> None:
+ super(Entity, self).__init__(**kwargs)
+ self.text = text
+ self.category = category
+ self.subcategory = subcategory
+ self.offset = offset
+ self.length = length
+ self.confidence_score = confidence_score
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, error, **kwargs) -> None:
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = error
+
+
+class HealthcareEntity(Entity):
+ """HealthcareEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ :param is_negated: Required.
+ :type is_negated: bool
+ :param links: Entity references in known data sources.
+ :type links:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareEntityLink]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'},
+ }
+
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, is_negated: bool, subcategory: str=None, links=None, **kwargs) -> None:
+ super(HealthcareEntity, self).__init__(text=text, category=category, subcategory=subcategory, offset=offset, length=length, confidence_score=confidence_score, **kwargs)
+ self.is_negated = is_negated
+ self.links = links
+
+
+class HealthcareEntityLink(Model):
+ """HealthcareEntityLink.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV,
+ MSH, etc.
+ :type data_source: str
+ :param id: Required. Entity id in the given source catalog.
+ :type id: str
+ """
+
+ _validation = {
+ 'data_source': {'required': True},
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ }
+
+ def __init__(self, *, data_source: str, id: str, **kwargs) -> None:
+ super(HealthcareEntityLink, self).__init__(**kwargs)
+ self.data_source = data_source
+ self.id = id
+
+
+class HealthcareJobState(Model):
+ """HealthcareJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_3.models.State
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.HealthcareResult
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'results': {'key': 'results', 'type': 'HealthcareResult'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, results=None, errors=None, next_link: str=None, **kwargs) -> None:
+ super(HealthcareJobState, self).__init__(**kwargs)
+ self.created_date_time = created_date_time
+ self.expiration_date_time = expiration_date_time
+ self.job_id = job_id
+ self.last_update_date_time = last_update_date_time
+ self.status = status
+ self.results = results
+ self.errors = errors
+ self.next_link = next_link
+
+
+class HealthcareRelation(Model):
+ """HealthcareRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. Type of relation. Examples include:
+ `DosageOfMedication` or 'FrequencyOfMedication', etc.
+ :type relation_type: str
+ :param bidirectional: Required. If true the relation between the entities
+ is bidirectional, otherwise directionality is source to target.
+ :type bidirectional: bool
+ :param source: Required. Reference link to the source entity.
+ :type source: str
+ :param target: Required. Reference link to the target entity.
+ :type target: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'bidirectional': {'required': True},
+ 'source': {'required': True},
+ 'target': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'str'},
+ 'bidirectional': {'key': 'bidirectional', 'type': 'bool'},
+ 'source': {'key': 'source', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ }
+
+ def __init__(self, *, relation_type: str, bidirectional: bool, source: str, target: str, **kwargs) -> None:
+ super(HealthcareRelation, self).__init__(**kwargs)
+ self.relation_type = relation_type
+ self.bidirectional = bidirectional
+ self.source = source
+ self.target = target
+
+
+class HealthcareResult(Model):
+ """HealthcareResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentHealthcareEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(HealthcareResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_3.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, *, code, message: str, details=None, target: str=None, innererror=None, **kwargs) -> None:
+ super(InnerError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.details = details
+ self.target = target
+ self.innererror = innererror
+
+
+class JobDescriptor(Model):
+ """JobDescriptor.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ """
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, *, display_name: str=None, **kwargs) -> None:
+ super(JobDescriptor, self).__init__(**kwargs)
+ self.display_name = display_name
+
+
+class JobManifest(Model):
+ """JobManifest.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, *, tasks, **kwargs) -> None:
+ super(JobManifest, self).__init__(**kwargs)
+ self.tasks = tasks
+
+
+class JobManifestTasks(Model):
+ """The set of tasks to execute on the input documents. Cannot specify the same
+ task more than once.
+
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.EntitiesTask]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.PiiTask]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhrasesTask]
+ """
+
+ _attribute_map = {
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'},
+ }
+
+ def __init__(self, *, entity_recognition_tasks=None, entity_recognition_pii_tasks=None, key_phrase_extraction_tasks=None, **kwargs) -> None:
+ super(JobManifestTasks, self).__init__(**kwargs)
+ self.entity_recognition_tasks = entity_recognition_tasks
+ self.entity_recognition_pii_tasks = entity_recognition_pii_tasks
+ self.key_phrase_extraction_tasks = key_phrase_extraction_tasks
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class KeyPhrasesTask(Model):
+ """KeyPhrasesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.KeyPhrasesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(KeyPhrasesTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class KeyPhrasesTaskParameters(Model):
+ """KeyPhrasesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ }
+
+ def __init__(self, *, model_version: str="latest", **kwargs) -> None:
+ super(KeyPhrasesTaskParameters, self).__init__(**kwargs)
+ self.model_version = model_version
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, country_hint: str=None, **kwargs) -> None:
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.country_hint = country_hint
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_3.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing Entity Search API unique identifier of the recognized
+ entity.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, *, name: str, matches, language: str, url: str, data_source: str, id: str=None, bing_id: str=None, **kwargs) -> None:
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = name
+ self.matches = matches
+ self.language = language
+ self.id = id
+ self.url = url
+ self.data_source = data_source
+ self.bing_id = bing_id
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well-known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, *, confidence_score: float, text: str, offset: int, length: int, **kwargs) -> None:
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = confidence_score
+ self.text = text
+ self.offset = offset
+ self.length = length
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, language: str=None, **kwargs) -> None:
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.language = language
+
+
+class Pagination(Model):
+ """Pagination.
+
+ :param next_link:
+ :type next_link: str
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, next_link: str=None, **kwargs) -> None:
+ super(Pagination, self).__init__(**kwargs)
+ self.next_link = next_link
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_3.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, redacted_text: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.redacted_text = redacted_text
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class PiiResult(Model):
+ """PiiResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(PiiResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class PiiTask(Model):
+ """PiiTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.PiiTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(PiiTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class PiiTaskParameters(Model):
+ """PiiTaskParameters.
+
+ :param domain: Possible values include: 'phi', 'none'. Default value:
+ "none" .
+ :type domain: str or ~azure.ai.textanalytics.v3_1_preview_3.models.enum
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'domain': {'key': 'domain', 'type': 'str'},
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'StringIndexTypeResponse'},
+ }
+
+ def __init__(self, *, domain="none", model_version: str="latest", string_index_type="TextElements_v8", **kwargs) -> None:
+ super(PiiTaskParameters, self).__init__(**kwargs)
+ self.domain = domain
+ self.model_version = model_version
+ self.string_index_type = string_index_type
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, *, documents_count: int, valid_documents_count: int, erroneous_documents_count: int, transactions_count: int, **kwargs) -> None:
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = documents_count
+ self.valid_documents_count = valid_documents_count
+ self.erroneous_documents_count = erroneous_documents_count
+ self.transactions_count = transactions_count
+
+
+class SentenceAspect(Model):
+ """SentenceAspect.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Aspect level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue
+ :param confidence_scores: Required. Aspect level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel
+ :param offset: Required. The aspect offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the aspect.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param relations: Required. The array of either opinion or aspect object
+ which is related to the aspect.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.AspectRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[AspectRelation]'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, relations, **kwargs) -> None:
+ super(SentenceAspect, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.relations = relations
+
+
+class SentenceOpinion(Model):
+ """SentenceOpinion.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Opinion level sentiment for the aspect in the
+ sentence. Possible values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.TokenSentimentValue
+ :param confidence_scores: Required. Opinion level sentiment confidence
+ scores for the aspect in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AspectConfidenceScoreLabel
+ :param offset: Required. The opinion offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the opinion.
+ :type length: int
+ :param text: Required. The aspect text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the opinion is
+ negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'AspectConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, is_negated: bool, **kwargs) -> None:
+ super(SentenceOpinion, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.is_negated = is_negated
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param aspects: The array of aspect object for the sentence.
+ :type aspects:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceAspect]
+ :param opinions: The array of opinion object for the sentence.
+ :type opinions:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.SentenceOpinion]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'aspects': {'key': 'aspects', 'type': '[SentenceAspect]'},
+ 'opinions': {'key': 'opinions', 'type': '[SentenceOpinion]'},
+ }
+
+ def __init__(self, *, text: str, sentiment, confidence_scores, offset: int, length: int, aspects=None, opinions=None, **kwargs) -> None:
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = text
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.aspects = aspects
+ self.opinions = opinions
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, neutral: float, negative: float, **kwargs) -> None:
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.neutral = neutral
+ self.negative = negative
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class TasksState(Model):
+ """TasksState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeJobStateTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ }
+
+ def __init__(self, *, tasks, **kwargs) -> None:
+ super(TasksState, self).__init__(**kwargs)
+ self.tasks = tasks
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable', 'NotFound'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_3.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, *, code, message: str, target: str=None, innererror=None, details=None, **kwargs) -> None:
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target = target
+ self.innererror = innererror
+ self.details = details
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, *, code, message: str, target_ref: str=None, **kwargs) -> None:
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target_ref = target_ref
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_text_analytics_client_enums.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_text_analytics_client_enums.py
new file mode 100644
index 000000000000..926a171f49e5
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/models/_text_analytics_client_enums.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+
+
+class StringIndexTypeResponse(str, Enum):
+
+ text_elements_v8 = "TextElements_v8" #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is written in .Net Framework or .Net Core and you will be using StringInfo.
+ unicode_code_point = "UnicodeCodePoint" #: Returned offset and length values will correspond to Unicode code points. Use this option if your application is written in a language that support Unicode, for example Python.
+ utf16_code_unit = "Utf16CodeUnit" #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your application is written in a language that support Unicode, for example Java, JavaScript.
+
+
+class ErrorCodeValue(str, Enum):
+
+ invalid_request = "InvalidRequest"
+ invalid_argument = "InvalidArgument"
+ internal_server_error = "InternalServerError"
+ service_unavailable = "ServiceUnavailable"
+ not_found = "NotFound"
+
+
+class InnerErrorCodeValue(str, Enum):
+
+ invalid_parameter_value = "InvalidParameterValue"
+ invalid_request_body_format = "InvalidRequestBodyFormat"
+ empty_request = "EmptyRequest"
+ missing_input_records = "MissingInputRecords"
+ invalid_document = "InvalidDocument"
+ model_version_incorrect = "ModelVersionIncorrect"
+ invalid_document_batch = "InvalidDocumentBatch"
+ unsupported_language_code = "UnsupportedLanguageCode"
+ invalid_country_hint = "InvalidCountryHint"
+
+
+class WarningCodeValue(str, Enum):
+
+ long_words_in_document = "LongWordsInDocument"
+ document_truncated = "DocumentTruncated"
+
+
+class DocumentSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+ mixed = "mixed"
+
+
+class SentenceSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+
+
+class TokenSentimentValue(str, Enum):
+
+ positive = "positive"
+ mixed = "mixed"
+ negative = "negative"
+
+
+class AspectRelationType(str, Enum):
+
+ opinion = "opinion"
+ aspect = "aspect"
+
+
+class State(str, Enum):
+
+ not_started = "notStarted"
+ running = "running"
+ succeeded = "succeeded"
+ failed = "failed"
+ cancelled = "cancelled"
+ cancelling = "cancelling"
+ partially_completed = "partiallyCompleted"
+
+
+class StringIndexType(str, Enum):
+
+ text_elements_v8 = "TextElements_v8" #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is written in .Net Framework or .Net Core and you will be using StringInfo.
+ unicode_code_point = "UnicodeCodePoint" #: Returned offset and length values will correspond to Unicode code points. Use this option if your application is written in a language that support Unicode, for example Python.
+ utf16_code_unit = "Utf16CodeUnit" #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your application is written in a language that support Unicode, for example Java, JavaScript.
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/__init__.py
new file mode 100644
index 000000000000..e87e22b49362
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/__init__.py
@@ -0,0 +1,16 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin
+
+__all__ = [
+ 'TextAnalyticsClientOperationsMixin',
+]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/_text_analytics_client_operations.py
new file mode 100644
index 000000000000..60da25eb2a65
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/operations/_text_analytics_client_operations.py
@@ -0,0 +1,905 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.pipeline import ClientRawResponse
+from msrest.exceptions import HttpOperationError
+from .. import models
+
+
+class TextAnalyticsClientOperationsMixin(object):
+
+ def analyze(
+ self, body=None, custom_headers=None, raw=False, **operation_config):
+ """Submit analysis job.
+
+ Submit a collection of text documents for analysis. Specify one or more
+ unique tasks to be executed.
+
+ :param body: Collection of documents to analyze and tasks to execute.
+ :type body:
+ ~azure.ai.textanalytics.v3_1_preview_3.models.AnalyzeBatchInput
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.analyze.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ if body is not None:
+ body_content = self._serialize.body(body, 'AnalyzeBatchInput')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ analyze.metadata = {'url': '/analyze'}
+
+ def analyze_status(
+ self, job_id, show_stats=None, top=20, skip=0, custom_headers=None, raw=False, **operation_config):
+ """Get analysis status and results.
+
+ Get the status of an analysis job. A job may consist of one or more
+ tasks. Once all tasks are completed, the job will transition to the
+ completed state and results will be available for each task.
+
+ :param job_id: Job ID for Analyze
+ :type job_id: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param top: (Optional) Set the maximum number of results per task.
+ When both $top and $skip are specified, $skip is applied first.
+ :type top: int
+ :param skip: (Optional) Set the number of elements to offset in the
+ response. When both $top and $skip are specified, $skip is applied
+ first.
+ :type skip: int
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.analyze_status.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if top is not None:
+ query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1)
+ if skip is not None:
+ query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('AnalyzeJobState', response)
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ analyze_status.metadata = {'url': '/analyze/jobs/{jobId}'}
+
+ def health_status(
+ self, job_id, top=20, skip=0, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Get healthcare analysis job status and results.
+
+ Get details of the healthcare prediction job specified by the jobId.
+
+ :param job_id: Job ID
+ :type job_id: str
+ :param top: (Optional) Set the maximum number of results per task.
+ When both $top and $skip are specified, $skip is applied first.
+ :type top: int
+ :param skip: (Optional) Set the number of elements to offset in the
+ response. When both $top and $skip are specified, $skip is applied
+ first.
+ :type skip: int
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.health_status.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if top is not None:
+ query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1)
+ if skip is not None:
+ query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('HealthcareJobState', response)
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ health_status.metadata = {'url': '/entities/health/jobs/{jobId}'}
+
+ def cancel_health_job(
+ self, job_id, custom_headers=None, raw=False, **operation_config):
+ """Cancel healthcare prediction job.
+
+ Cancel healthcare prediction job.
+
+ :param job_id: Job ID
+ :type job_id: str
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.cancel_health_job.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ cancel_health_job.metadata = {'url': '/entities/health/jobs/{jobId}'}
+
+ def health(
+ self, documents, model_version=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Submit healthcare analysis job.
+
+ Start a healthcare analysis job to recognize healthcare related
+ entities (drugs, conditions, symptoms, etc) and their relations.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_3.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.health.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ health.metadata = {'url': '/entities/health/jobs'}
+
+ def entities_recognition_general(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Named Entity Recognition.
+
+ The API returns a list of general named entities in a given document.
+ For the list of supported entity types, check Supported Entity Types in Text Analytics
+ API. See the Supported languages
+ in Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_general.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntitiesResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_general.metadata = {'url': '/entities/recognition/general'}
+
+ def entities_recognition_pii(
+ self, documents, model_version=None, show_stats=None, domain=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Entities containing personal information.
+
+ The API returns a list of entities with personal information (\"SSN\",
+ \"Bank Account\" etc) in the document. For the list of supported entity
+ types, check Supported Entity Types
+ in Text Analytics API. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+ .
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param domain: (Optional) if specified, will set the PII domain to
+ include only a subset of the entity categories. Possible values
+ include: 'PHI', 'none'.
+ :type domain: str
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_pii.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if domain is not None:
+ query_parameters['domain'] = self._serialize.query("domain", domain, 'str')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PiiResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_pii.metadata = {'url': '/entities/recognition/pii'}
+
+ def entities_linking(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Linked entities from a well-known knowledge base.
+
+ The API returns a list of recognized entities with links to a
+ well-known knowledge base. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_linking.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntityLinkingResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_linking.metadata = {'url': '/entities/linking'}
+
+ def key_phrases(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Key Phrases.
+
+ The API returns a list of strings denoting the key phrases in the input
+ text. See the Supported languages in
+ Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.key_phrases.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('KeyPhraseResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ key_phrases.metadata = {'url': '/keyPhrases'}
+
+ def languages(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Detect Language.
+
+ The API returns the detected language and a numeric score between 0 and
+ 1. Scores close to 1 indicate 100% certainty that the identified
+ language is true. See the Supported
+ languages in Text Analytics API for the list of enabled languages.
+
+ :param documents:
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.LanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.LanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.languages.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'LanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('LanguageResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ languages.metadata = {'url': '/languages'}
+
+ def sentiment(
+ self, documents, model_version=None, show_stats=None, opinion_mining=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Sentiment.
+
+ The API returns a detailed sentiment analysis for the input text. The
+ analysis is done in multiple levels of granularity, start from the a
+ document level, down to sentence and key terms (aspects) and opinions.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_3.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param opinion_mining: (Optional) if set to true, response will
+ contain input and document level statistics including aspect-based
+ sentiment analysis results.
+ :type opinion_mining: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_3.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.sentiment.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if opinion_mining is not None:
+ query_parameters['opinionMining'] = self._serialize.query("opinion_mining", opinion_mining, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SentimentResponse', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ sentiment.metadata = {'url': '/sentiment'}
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/version.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/version.py
new file mode 100644
index 000000000000..b8ffb04f789f
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_3/version.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+VERSION = "0.0.1"
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/__init__.py
new file mode 100644
index 000000000000..48d59a14cb3c
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/__init__.py
@@ -0,0 +1,19 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._configuration import TextAnalyticsClientConfiguration
+from ._text_analytics_client import TextAnalyticsClient
+__all__ = ['TextAnalyticsClient', 'TextAnalyticsClientConfiguration']
+
+from .version import VERSION
+
+__version__ = VERSION
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_configuration.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_configuration.py
new file mode 100644
index 000000000000..e40a3ca146ec
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_configuration.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest import Configuration
+
+from .version import VERSION
+
+
+class TextAnalyticsClientConfiguration(Configuration):
+ """Configuration for TextAnalyticsClient
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ if endpoint is None:
+ raise ValueError("Parameter 'endpoint' must not be None.")
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ base_url = '{Endpoint}/text/analytics/v3.1-preview.4'
+
+ super(TextAnalyticsClientConfiguration, self).__init__(base_url)
+
+ # Starting Autorest.Python 4.0.64, make connection pool activated by default
+ self.keep_alive = True
+
+ self.add_user_agent('azure-ai-textanalytics/{}'.format(VERSION))
+
+ self.endpoint = endpoint
+ self.credentials = credentials
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_text_analytics_client.py
new file mode 100644
index 000000000000..c689eac3e037
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/_text_analytics_client.py
@@ -0,0 +1,45 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.service_client import SDKClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import TextAnalyticsClientConfiguration
+from .operations import TextAnalyticsClientOperationsMixin
+from msrest.exceptions import HttpOperationError
+from . import models
+
+
+class TextAnalyticsClient(TextAnalyticsClientOperationsMixin, SDKClient):
+ """The Text Analytics API is a suite of natural language processing (NLP) services built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction and language detection. Functionality for analysis of text specific to the healthcare domain and personal information are also available in the API. Further documentation can be found in <a href="https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview">https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview</a>
+
+ :ivar config: Configuration for client.
+ :vartype config: TextAnalyticsClientConfiguration
+
+ :param endpoint: Supported Cognitive Services endpoints (protocol and
+ hostname, for example: https://westus.api.cognitive.microsoft.com).
+ :type endpoint: str
+ :param credentials: Subscription credentials which uniquely identify
+ client subscription.
+ :type credentials: None
+ """
+
+ def __init__(
+ self, endpoint, credentials):
+
+ self.config = TextAnalyticsClientConfiguration(endpoint, credentials)
+ super(TextAnalyticsClient, self).__init__(self.config.credentials, self.config)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = 'v3.1-preview.4'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/__init__.py
new file mode 100644
index 000000000000..2453a4589659
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/__init__.py
@@ -0,0 +1,243 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import AnalyzeBatchInput
+ from ._models_py3 import AnalyzeJobMetadata
+ from ._models_py3 import AnalyzeJobState
+ from ._models_py3 import AnalyzeJobStateTasks
+ from ._models_py3 import AnalyzeJobStateTasksDetails
+ from ._models_py3 import AnalyzeJobStateTasksEntityLinkingTasksItem
+ from ._models_py3 import AnalyzeJobStateTasksEntityRecognitionPiiTasksItem
+ from ._models_py3 import AnalyzeJobStateTasksEntityRecognitionTasksItem
+ from ._models_py3 import AnalyzeJobStateTasksKeyPhraseExtractionTasksItem
+ from ._models_py3 import DetectedLanguage
+ from ._models_py3 import DocumentEntities
+ from ._models_py3 import DocumentError
+ from ._models_py3 import DocumentHealthcareEntities
+ from ._models_py3 import DocumentKeyPhrases
+ from ._models_py3 import DocumentLanguage
+ from ._models_py3 import DocumentLinkedEntities
+ from ._models_py3 import DocumentSentiment
+ from ._models_py3 import DocumentStatistics
+ from ._models_py3 import EntitiesResult
+ from ._models_py3 import EntitiesTask
+ from ._models_py3 import EntitiesTaskParameters
+ from ._models_py3 import Entity
+ from ._models_py3 import EntityLinkingResult
+ from ._models_py3 import EntityLinkingTask
+ from ._models_py3 import EntityLinkingTaskParameters
+ from ._models_py3 import ErrorResponse
+ from ._models_py3 import HealthcareAssertion
+ from ._models_py3 import HealthcareEntity
+ from ._models_py3 import HealthcareEntityLink
+ from ._models_py3 import HealthcareJobState
+ from ._models_py3 import HealthcareRelation
+ from ._models_py3 import HealthcareRelationEntity
+ from ._models_py3 import HealthcareResult
+ from ._models_py3 import InnerError
+ from ._models_py3 import JobDescriptor
+ from ._models_py3 import JobManifest
+ from ._models_py3 import JobManifestTasks
+ from ._models_py3 import JobMetadata
+ from ._models_py3 import KeyPhraseResult
+ from ._models_py3 import KeyPhrasesTask
+ from ._models_py3 import KeyPhrasesTaskParameters
+ from ._models_py3 import LanguageBatchInput
+ from ._models_py3 import LanguageInput
+ from ._models_py3 import LanguageResult
+ from ._models_py3 import LinkedEntity
+ from ._models_py3 import Match
+ from ._models_py3 import MultiLanguageBatchInput
+ from ._models_py3 import MultiLanguageInput
+ from ._models_py3 import Pagination
+ from ._models_py3 import PiiDocumentEntities
+ from ._models_py3 import PiiResult
+ from ._models_py3 import PiiTask
+ from ._models_py3 import PiiTaskParameters
+ from ._models_py3 import RequestStatistics
+ from ._models_py3 import SentenceAssessment
+ from ._models_py3 import SentenceSentiment
+ from ._models_py3 import SentenceTarget
+ from ._models_py3 import SentimentConfidenceScorePerLabel
+ from ._models_py3 import SentimentResponse
+ from ._models_py3 import TargetConfidenceScoreLabel
+ from ._models_py3 import TargetRelation
+ from ._models_py3 import TasksState
+ from ._models_py3 import TaskState
+ from ._models_py3 import TextAnalyticsError
+ from ._models_py3 import TextAnalyticsWarning
+except (SyntaxError, ImportError):
+ from ._models import AnalyzeBatchInput
+ from ._models import AnalyzeJobMetadata
+ from ._models import AnalyzeJobState
+ from ._models import AnalyzeJobStateTasks
+ from ._models import AnalyzeJobStateTasksDetails
+ from ._models import AnalyzeJobStateTasksEntityLinkingTasksItem
+ from ._models import AnalyzeJobStateTasksEntityRecognitionPiiTasksItem
+ from ._models import AnalyzeJobStateTasksEntityRecognitionTasksItem
+ from ._models import AnalyzeJobStateTasksKeyPhraseExtractionTasksItem
+ from ._models import DetectedLanguage
+ from ._models import DocumentEntities
+ from ._models import DocumentError
+ from ._models import DocumentHealthcareEntities
+ from ._models import DocumentKeyPhrases
+ from ._models import DocumentLanguage
+ from ._models import DocumentLinkedEntities
+ from ._models import DocumentSentiment
+ from ._models import DocumentStatistics
+ from ._models import EntitiesResult
+ from ._models import EntitiesTask
+ from ._models import EntitiesTaskParameters
+ from ._models import Entity
+ from ._models import EntityLinkingResult
+ from ._models import EntityLinkingTask
+ from ._models import EntityLinkingTaskParameters
+ from ._models import ErrorResponse
+ from ._models import HealthcareAssertion
+ from ._models import HealthcareEntity
+ from ._models import HealthcareEntityLink
+ from ._models import HealthcareJobState
+ from ._models import HealthcareRelation
+ from ._models import HealthcareRelationEntity
+ from ._models import HealthcareResult
+ from ._models import InnerError
+ from ._models import JobDescriptor
+ from ._models import JobManifest
+ from ._models import JobManifestTasks
+ from ._models import JobMetadata
+ from ._models import KeyPhraseResult
+ from ._models import KeyPhrasesTask
+ from ._models import KeyPhrasesTaskParameters
+ from ._models import LanguageBatchInput
+ from ._models import LanguageInput
+ from ._models import LanguageResult
+ from ._models import LinkedEntity
+ from ._models import Match
+ from ._models import MultiLanguageBatchInput
+ from ._models import MultiLanguageInput
+ from ._models import Pagination
+ from ._models import PiiDocumentEntities
+ from ._models import PiiResult
+ from ._models import PiiTask
+ from ._models import PiiTaskParameters
+ from ._models import RequestStatistics
+ from ._models import SentenceAssessment
+ from ._models import SentenceSentiment
+ from ._models import SentenceTarget
+ from ._models import SentimentConfidenceScorePerLabel
+ from ._models import SentimentResponse
+ from ._models import TargetConfidenceScoreLabel
+ from ._models import TargetRelation
+ from ._models import TasksState
+ from ._models import TaskState
+ from ._models import TextAnalyticsError
+ from ._models import TextAnalyticsWarning
+from ._text_analytics_client_enums import (
+ Association,
+ Certainty,
+ Conditionality,
+ DocumentSentimentValue,
+ ErrorCodeValue,
+ InnerErrorCodeValue,
+ PiiCategory,
+ RelationType,
+ SentenceSentimentValue,
+ State,
+ StringIndexType,
+ StringIndexTypeResponse,
+ TargetRelationType,
+ TokenSentimentValue,
+ WarningCodeValue,
+)
+
+__all__ = [
+ 'AnalyzeBatchInput',
+ 'AnalyzeJobMetadata',
+ 'AnalyzeJobState',
+ 'AnalyzeJobStateTasks',
+ 'AnalyzeJobStateTasksDetails',
+ 'AnalyzeJobStateTasksEntityLinkingTasksItem',
+ 'AnalyzeJobStateTasksEntityRecognitionPiiTasksItem',
+ 'AnalyzeJobStateTasksEntityRecognitionTasksItem',
+ 'AnalyzeJobStateTasksKeyPhraseExtractionTasksItem',
+ 'DetectedLanguage',
+ 'DocumentEntities',
+ 'DocumentError',
+ 'DocumentHealthcareEntities',
+ 'DocumentKeyPhrases',
+ 'DocumentLanguage',
+ 'DocumentLinkedEntities',
+ 'DocumentSentiment',
+ 'DocumentStatistics',
+ 'EntitiesResult',
+ 'EntitiesTask',
+ 'EntitiesTaskParameters',
+ 'Entity',
+ 'EntityLinkingResult',
+ 'EntityLinkingTask',
+ 'EntityLinkingTaskParameters',
+ 'ErrorResponse',
+ 'HealthcareAssertion',
+ 'HealthcareEntity',
+ 'HealthcareEntityLink',
+ 'HealthcareJobState',
+ 'HealthcareRelation',
+ 'HealthcareRelationEntity',
+ 'HealthcareResult',
+ 'InnerError',
+ 'JobDescriptor',
+ 'JobManifest',
+ 'JobManifestTasks',
+ 'JobMetadata',
+ 'KeyPhraseResult',
+ 'KeyPhrasesTask',
+ 'KeyPhrasesTaskParameters',
+ 'LanguageBatchInput',
+ 'LanguageInput',
+ 'LanguageResult',
+ 'LinkedEntity',
+ 'Match',
+ 'MultiLanguageBatchInput',
+ 'MultiLanguageInput',
+ 'Pagination',
+ 'PiiDocumentEntities',
+ 'PiiResult',
+ 'PiiTask',
+ 'PiiTaskParameters',
+ 'RequestStatistics',
+ 'SentenceAssessment',
+ 'SentenceSentiment',
+ 'SentenceTarget',
+ 'SentimentConfidenceScorePerLabel',
+ 'SentimentResponse',
+ 'TargetConfidenceScoreLabel',
+ 'TargetRelation',
+ 'TasksState',
+ 'TaskState',
+ 'TextAnalyticsError',
+ 'TextAnalyticsWarning',
+ 'StringIndexTypeResponse',
+ 'PiiCategory',
+ 'ErrorCodeValue',
+ 'InnerErrorCodeValue',
+ 'WarningCodeValue',
+ 'DocumentSentimentValue',
+ 'SentenceSentimentValue',
+ 'TokenSentimentValue',
+ 'TargetRelationType',
+ 'State',
+ 'Conditionality',
+ 'Certainty',
+ 'Association',
+ 'RelationType',
+ 'StringIndexType',
+]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models.py
new file mode 100644
index 000000000000..fb899d32272e
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models.py
@@ -0,0 +1,2306 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+
+
+class AnalyzeBatchInput(Model):
+ """AnalyzeBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ :param analysis_input: Required.
+ :type analysis_input:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageBatchInput
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'analysis_input': {'required': True},
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'},
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeBatchInput, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+ self.analysis_input = kwargs.get('analysis_input', None)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class JobMetadata(Model):
+ """JobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobMetadata, self).__init__(**kwargs)
+ self.created_date_time = kwargs.get('created_date_time', None)
+ self.expiration_date_time = kwargs.get('expiration_date_time', None)
+ self.job_id = kwargs.get('job_id', None)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.status = kwargs.get('status', None)
+
+
+class AnalyzeJobMetadata(JobMetadata):
+ """AnalyzeJobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ :param display_name:
+ :type display_name: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobMetadata, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+
+
+class AnalyzeJobState(Model):
+ """AnalyzeJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name:
+ :type display_name: str
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasks
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobState, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+ self.tasks = kwargs.get('tasks', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class AnalyzeJobStateTasks(Model):
+ """AnalyzeJobStateTasks.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param details:
+ :type details:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksDetails
+ :param completed: Required.
+ :type completed: int
+ :param failed: Required.
+ :type failed: int
+ :param in_progress: Required.
+ :type in_progress: int
+ :param total: Required.
+ :type total: int
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityRecognitionTasksItem]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]
+ :param entity_linking_tasks:
+ :type entity_linking_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityLinkingTasksItem]
+ """
+
+ _validation = {
+ 'completed': {'required': True},
+ 'failed': {'required': True},
+ 'in_progress': {'required': True},
+ 'total': {'required': True},
+ }
+
+ _attribute_map = {
+ 'details': {'key': 'details', 'type': 'AnalyzeJobStateTasksDetails'},
+ 'completed': {'key': 'completed', 'type': 'int'},
+ 'failed': {'key': 'failed', 'type': 'int'},
+ 'in_progress': {'key': 'inProgress', 'type': 'int'},
+ 'total': {'key': 'total', 'type': 'int'},
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionTasksItem]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]'},
+ 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[AnalyzeJobStateTasksEntityLinkingTasksItem]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasks, self).__init__(**kwargs)
+ self.details = kwargs.get('details', None)
+ self.completed = kwargs.get('completed', None)
+ self.failed = kwargs.get('failed', None)
+ self.in_progress = kwargs.get('in_progress', None)
+ self.total = kwargs.get('total', None)
+ self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None)
+ self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None)
+ self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None)
+ self.entity_linking_tasks = kwargs.get('entity_linking_tasks', None)
+
+
+class TaskState(Model):
+ """TaskState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TaskState, self).__init__(**kwargs)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.name = kwargs.get('name', None)
+ self.status = kwargs.get('status', None)
+
+
+class AnalyzeJobStateTasksDetails(TaskState):
+ """AnalyzeJobStateTasksDetails.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksDetails, self).__init__(**kwargs)
+
+
+class AnalyzeJobStateTasksEntityLinkingTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityLinkingTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntityLinkingResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksEntityLinkingTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AnalyzeJobStateTasksEntityRecognitionPiiTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionPiiTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'PiiResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksEntityRecognitionPiiTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AnalyzeJobStateTasksEntityRecognitionTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntitiesResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksEntityRecognitionTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class AnalyzeJobStateTasksKeyPhraseExtractionTasksItem(TaskState):
+ """AnalyzeJobStateTasksKeyPhraseExtractionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'KeyPhraseResult'},
+ }
+
+ def __init__(self, **kwargs):
+ super(AnalyzeJobStateTasksKeyPhraseExtractionTasksItem, self).__init__(**kwargs)
+ self.results = kwargs.get('results', None)
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.iso6391_name = kwargs.get('iso6391_name', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.error = kwargs.get('error', None)
+
+
+class DocumentHealthcareEntities(Model):
+ """DocumentHealthcareEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Healthcare entities.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntity]
+ :param relations: Required. Healthcare entity relations.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelation]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'relations': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'},
+ 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentHealthcareEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.relations = kwargs.get('relations', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.key_phrases = kwargs.get('key_phrases', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.detected_language = kwargs.get('detected_language', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.sentences = kwargs.get('sentences', None)
+ self.warnings = kwargs.get('warnings', None)
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = kwargs.get('characters_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class EntitiesTask(Model):
+ """EntitiesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class EntitiesTaskParameters(Model):
+ """EntitiesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntitiesTaskParameters, self).__init__(**kwargs)
+ self.model_version = kwargs.get('model_version', "latest")
+ self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Entity, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.category = kwargs.get('category', None)
+ self.subcategory = kwargs.get('subcategory', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.confidence_score = kwargs.get('confidence_score', None)
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class EntityLinkingTask(Model):
+ """EntityLinkingTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntityLinkingTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntityLinkingTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class EntityLinkingTaskParameters(Model):
+ """EntityLinkingTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(EntityLinkingTaskParameters, self).__init__(**kwargs)
+ self.model_version = kwargs.get('model_version', "latest")
+ self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = kwargs.get('error', None)
+
+
+class HealthcareAssertion(Model):
+ """HealthcareAssertion.
+
+ :param conditionality: Describes any conditionality on the entity.
+ Possible values include: 'Hypothetical', 'Conditional'
+ :type conditionality: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Conditionality
+ :param certainty: Describes the entities certainty and polarity. Possible
+ values include: 'Positive', 'Positive Possible', 'Neutral Possible',
+ 'Negative Possible', 'Negative'
+ :type certainty: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Certainty
+ :param association: Describes if the entity is the subject of the text or
+ if it describes someone else. Possible values include: 'subject', 'other'
+ :type association: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Association
+ """
+
+ _attribute_map = {
+ 'conditionality': {'key': 'conditionality', 'type': 'Conditionality'},
+ 'certainty': {'key': 'certainty', 'type': 'Certainty'},
+ 'association': {'key': 'association', 'type': 'Association'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareAssertion, self).__init__(**kwargs)
+ self.conditionality = kwargs.get('conditionality', None)
+ self.certainty = kwargs.get('certainty', None)
+ self.association = kwargs.get('association', None)
+
+
+class HealthcareEntity(Entity):
+ """HealthcareEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ :param assertion:
+ :type assertion:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareAssertion
+ :param name: Preferred name for the entity. Example: 'histologically'
+ would have a 'name' of 'histologic'.
+ :type name: str
+ :param links: Entity references in known data sources.
+ :type links:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntityLink]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'assertion': {'key': 'assertion', 'type': 'HealthcareAssertion'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareEntity, self).__init__(**kwargs)
+ self.assertion = kwargs.get('assertion', None)
+ self.name = kwargs.get('name', None)
+ self.links = kwargs.get('links', None)
+
+
+class HealthcareEntityLink(Model):
+ """HealthcareEntityLink.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV,
+ MSH, etc.
+ :type data_source: str
+ :param id: Required. Entity id in the given source catalog.
+ :type id: str
+ """
+
+ _validation = {
+ 'data_source': {'required': True},
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareEntityLink, self).__init__(**kwargs)
+ self.data_source = kwargs.get('data_source', None)
+ self.id = kwargs.get('id', None)
+
+
+class HealthcareJobState(Model):
+ """HealthcareJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareResult
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'results': {'key': 'results', 'type': 'HealthcareResult'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareJobState, self).__init__(**kwargs)
+ self.created_date_time = kwargs.get('created_date_time', None)
+ self.expiration_date_time = kwargs.get('expiration_date_time', None)
+ self.job_id = kwargs.get('job_id', None)
+ self.last_update_date_time = kwargs.get('last_update_date_time', None)
+ self.status = kwargs.get('status', None)
+ self.results = kwargs.get('results', None)
+ self.errors = kwargs.get('errors', None)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class HealthcareRelation(Model):
+ """Every relation is an entity graph of a certain relationType, where all
+ entities are connected and have specific roles within the relation context.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. Type of relation. Examples include:
+ `DosageOfMedication` or 'FrequencyOfMedication', etc. Possible values
+ include: 'Abbreviation', 'DirectionOfBodyStructure',
+ 'DirectionOfCondition', 'DirectionOfExamination', 'DirectionOfTreatment',
+ 'DosageOfMedication', 'FormOfMedication', 'FrequencyOfMedication',
+ 'FrequencyOfTreatment', 'QualifierOfCondition', 'RelationOfExamination',
+ 'RouteOfMedication', 'TimeOfCondition', 'TimeOfEvent',
+ 'TimeOfExamination', 'TimeOfMedication', 'TimeOfTreatment',
+ 'UnitOfCondition', 'UnitOfExamination', 'ValueOfCondition',
+ 'ValueOfExamination'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RelationType
+ :param entities: Required. The entities in the relation.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelationEntity]
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'entities': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareRelationEntity]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareRelation, self).__init__(**kwargs)
+ self.relation_type = kwargs.get('relation_type', None)
+ self.entities = kwargs.get('entities', None)
+
+
+class HealthcareRelationEntity(Model):
+ """HealthcareRelationEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param ref: Required. Reference link object, using a JSON pointer RFC 6901
+ (URI Fragment Identifier Representation), pointing to the entity .
+ :type ref: str
+ :param role: Required. Role of entity in the relationship. For example:
+ 'CD20-positive diffuse large B-cell lymphoma' has the following entities
+ with their roles in parenthesis: CD20 (GeneOrProtein), Positive
+ (Expression), diffuse large B-cell lymphoma (Diagnosis).
+ :type role: str
+ """
+
+ _validation = {
+ 'ref': {'required': True},
+ 'role': {'required': True},
+ }
+
+ _attribute_map = {
+ 'ref': {'key': 'ref', 'type': 'str'},
+ 'role': {'key': 'role', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareRelationEntity, self).__init__(**kwargs)
+ self.ref = kwargs.get('ref', None)
+ self.role = kwargs.get('role', None)
+
+
+class HealthcareResult(Model):
+ """HealthcareResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentHealthcareEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(HealthcareResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, **kwargs):
+ super(InnerError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.details = kwargs.get('details', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+
+
+class JobDescriptor(Model):
+ """JobDescriptor.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ """
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobDescriptor, self).__init__(**kwargs)
+ self.display_name = kwargs.get('display_name', None)
+
+
+class JobManifest(Model):
+ """JobManifest.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobManifest, self).__init__(**kwargs)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class JobManifestTasks(Model):
+ """The set of tasks to execute on the input documents. Cannot specify the same
+ task more than once.
+
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTask]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiTask]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTask]
+ :param entity_linking_tasks:
+ :type entity_linking_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTask]
+ """
+
+ _attribute_map = {
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'},
+ 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[EntityLinkingTask]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(JobManifestTasks, self).__init__(**kwargs)
+ self.entity_recognition_tasks = kwargs.get('entity_recognition_tasks', None)
+ self.entity_recognition_pii_tasks = kwargs.get('entity_recognition_pii_tasks', None)
+ self.key_phrase_extraction_tasks = kwargs.get('key_phrase_extraction_tasks', None)
+ self.entity_linking_tasks = kwargs.get('entity_linking_tasks', None)
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class KeyPhrasesTask(Model):
+ """KeyPhrasesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhrasesTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class KeyPhrasesTaskParameters(Model):
+ """KeyPhrasesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyPhrasesTaskParameters, self).__init__(**kwargs)
+ self.model_version = kwargs.get('model_version', "latest")
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.country_hint = kwargs.get('country_hint', None)
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_4.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing Entity Search API unique identifier of the recognized
+ entity.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.matches = kwargs.get('matches', None)
+ self.language = kwargs.get('language', None)
+ self.id = kwargs.get('id', None)
+ self.url = kwargs.get('url', None)
+ self.data_source = kwargs.get('data_source', None)
+ self.bing_id = kwargs.get('bing_id', None)
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = kwargs.get('confidence_score', None)
+ self.text = kwargs.get('text', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.text = kwargs.get('text', None)
+ self.language = kwargs.get('language', None)
+
+
+class Pagination(Model):
+ """Pagination.
+
+ :param next_link:
+ :type next_link: str
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(Pagination, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.redacted_text = kwargs.get('redacted_text', None)
+ self.entities = kwargs.get('entities', None)
+ self.warnings = kwargs.get('warnings', None)
+ self.statistics = kwargs.get('statistics', None)
+
+
+class PiiResult(Model):
+ """PiiResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiResult, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class PiiTask(Model):
+ """PiiTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiTask, self).__init__(**kwargs)
+ self.parameters = kwargs.get('parameters', None)
+
+
+class PiiTaskParameters(Model):
+ """PiiTaskParameters.
+
+ :param domain: Possible values include: 'phi', 'none'. Default value:
+ "none" .
+ :type domain: str or ~azure.ai.textanalytics.v3_1_preview_4.models.enum
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param pii_categories:
+ :type pii_categories: list[str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory]
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'domain': {'key': 'domain', 'type': 'str'},
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'pii_categories': {'key': 'pii-categories', 'type': '[str]'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(PiiTaskParameters, self).__init__(**kwargs)
+ self.domain = kwargs.get('domain', "none")
+ self.model_version = kwargs.get('model_version', "latest")
+ self.pii_categories = kwargs.get('pii_categories', None)
+ self.string_index_type = kwargs.get('string_index_type', "TextElements_v8")
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, **kwargs):
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = kwargs.get('documents_count', None)
+ self.valid_documents_count = kwargs.get('valid_documents_count', None)
+ self.erroneous_documents_count = kwargs.get('erroneous_documents_count', None)
+ self.transactions_count = kwargs.get('transactions_count', None)
+
+
+class SentenceAssessment(Model):
+ """SentenceAssessment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Assessment sentiment in the sentence. Possible
+ values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue
+ :param confidence_scores: Required. Assessment sentiment confidence scores
+ in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel
+ :param offset: Required. The assessment offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the assessment.
+ :type length: int
+ :param text: Required. The assessment text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the assessment
+ is negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceAssessment, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.is_negated = kwargs.get('is_negated', None)
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param targets: The array of sentence targets for the sentence.
+ :type targets:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceTarget]
+ :param assessments: The array of assessments for the sentence.
+ :type assessments:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceAssessment]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'targets': {'key': 'targets', 'type': '[SentenceTarget]'},
+ 'assessments': {'key': 'assessments', 'type': '[SentenceAssessment]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = kwargs.get('text', None)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.targets = kwargs.get('targets', None)
+ self.assessments = kwargs.get('assessments', None)
+
+
+class SentenceTarget(Model):
+ """SentenceTarget.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Targeted sentiment in the sentence. Possible
+ values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue
+ :param confidence_scores: Required. Target sentiment confidence scores for
+ the target in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel
+ :param offset: Required. The target offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the target.
+ :type length: int
+ :param text: Required. The target text detected.
+ :type text: str
+ :param relations: Required. The array of either assessment or target
+ objects which is related to the target.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[TargetRelation]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentenceTarget, self).__init__(**kwargs)
+ self.sentiment = kwargs.get('sentiment', None)
+ self.confidence_scores = kwargs.get('confidence_scores', None)
+ self.offset = kwargs.get('offset', None)
+ self.length = kwargs.get('length', None)
+ self.text = kwargs.get('text', None)
+ self.relations = kwargs.get('relations', None)
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.neutral = kwargs.get('neutral', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = kwargs.get('documents', None)
+ self.errors = kwargs.get('errors', None)
+ self.statistics = kwargs.get('statistics', None)
+ self.model_version = kwargs.get('model_version', None)
+
+
+class TargetConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TargetConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = kwargs.get('positive', None)
+ self.negative = kwargs.get('negative', None)
+
+
+class TargetRelation(Model):
+ """TargetRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the target. Possible
+ values include: 'assessment', 'target'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'TargetRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TargetRelation, self).__init__(**kwargs)
+ self.relation_type = kwargs.get('relation_type', None)
+ self.ref = kwargs.get('ref', None)
+
+
+class TasksState(Model):
+ """TasksState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TasksState, self).__init__(**kwargs)
+ self.tasks = kwargs.get('tasks', None)
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable', 'NotFound'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target = kwargs.get('target', None)
+ self.innererror = kwargs.get('innererror', None)
+ self.details = kwargs.get('details', None)
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+ self.target_ref = kwargs.get('target_ref', None)
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models_py3.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models_py3.py
new file mode 100644
index 000000000000..4ca5277a07f9
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_models_py3.py
@@ -0,0 +1,2306 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+
+
+class AnalyzeBatchInput(Model):
+ """AnalyzeBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ :param analysis_input: Required.
+ :type analysis_input:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageBatchInput
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'analysis_input': {'required': True},
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'analysis_input': {'key': 'analysisInput', 'type': 'MultiLanguageBatchInput'},
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, *, analysis_input, tasks, display_name: str=None, **kwargs) -> None:
+ super(AnalyzeBatchInput, self).__init__(**kwargs)
+ self.display_name = display_name
+ self.analysis_input = analysis_input
+ self.tasks = tasks
+
+
+class JobMetadata(Model):
+ """JobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, **kwargs) -> None:
+ super(JobMetadata, self).__init__(**kwargs)
+ self.created_date_time = created_date_time
+ self.expiration_date_time = expiration_date_time
+ self.job_id = job_id
+ self.last_update_date_time = last_update_date_time
+ self.status = status
+
+
+class AnalyzeJobMetadata(JobMetadata):
+ """AnalyzeJobMetadata.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ :param display_name:
+ :type display_name: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, display_name: str=None, **kwargs) -> None:
+ super(AnalyzeJobMetadata, self).__init__(created_date_time=created_date_time, expiration_date_time=expiration_date_time, job_id=job_id, last_update_date_time=last_update_date_time, status=status, **kwargs)
+ self.display_name = display_name
+
+
+class AnalyzeJobState(Model):
+ """AnalyzeJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param display_name:
+ :type display_name: str
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasks
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, tasks, display_name: str=None, errors=None, statistics=None, next_link: str=None, **kwargs) -> None:
+ super(AnalyzeJobState, self).__init__(**kwargs)
+ self.display_name = display_name
+ self.tasks = tasks
+ self.errors = errors
+ self.statistics = statistics
+ self.next_link = next_link
+
+
+class AnalyzeJobStateTasks(Model):
+ """AnalyzeJobStateTasks.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param details:
+ :type details:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksDetails
+ :param completed: Required.
+ :type completed: int
+ :param failed: Required.
+ :type failed: int
+ :param in_progress: Required.
+ :type in_progress: int
+ :param total: Required.
+ :type total: int
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityRecognitionTasksItem]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]
+ :param entity_linking_tasks:
+ :type entity_linking_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasksEntityLinkingTasksItem]
+ """
+
+ _validation = {
+ 'completed': {'required': True},
+ 'failed': {'required': True},
+ 'in_progress': {'required': True},
+ 'total': {'required': True},
+ }
+
+ _attribute_map = {
+ 'details': {'key': 'details', 'type': 'AnalyzeJobStateTasksDetails'},
+ 'completed': {'key': 'completed', 'type': 'int'},
+ 'failed': {'key': 'failed', 'type': 'int'},
+ 'in_progress': {'key': 'inProgress', 'type': 'int'},
+ 'total': {'key': 'total', 'type': 'int'},
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionTasksItem]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[AnalyzeJobStateTasksEntityRecognitionPiiTasksItem]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[AnalyzeJobStateTasksKeyPhraseExtractionTasksItem]'},
+ 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[AnalyzeJobStateTasksEntityLinkingTasksItem]'},
+ }
+
+ def __init__(self, *, completed: int, failed: int, in_progress: int, total: int, details=None, entity_recognition_tasks=None, entity_recognition_pii_tasks=None, key_phrase_extraction_tasks=None, entity_linking_tasks=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasks, self).__init__(**kwargs)
+ self.details = details
+ self.completed = completed
+ self.failed = failed
+ self.in_progress = in_progress
+ self.total = total
+ self.entity_recognition_tasks = entity_recognition_tasks
+ self.entity_recognition_pii_tasks = entity_recognition_pii_tasks
+ self.key_phrase_extraction_tasks = key_phrase_extraction_tasks
+ self.entity_linking_tasks = entity_linking_tasks
+
+
+class TaskState(Model):
+ """TaskState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, **kwargs) -> None:
+ super(TaskState, self).__init__(**kwargs)
+ self.last_update_date_time = last_update_date_time
+ self.name = name
+ self.status = status
+
+
+class AnalyzeJobStateTasksDetails(TaskState):
+ """AnalyzeJobStateTasksDetails.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksDetails, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+
+
+class AnalyzeJobStateTasksEntityLinkingTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityLinkingTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntityLinkingResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, results=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksEntityLinkingTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AnalyzeJobStateTasksEntityRecognitionPiiTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionPiiTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results: ~azure.ai.textanalytics.v3_1_preview_4.models.PiiResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'PiiResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, results=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksEntityRecognitionPiiTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AnalyzeJobStateTasksEntityRecognitionTasksItem(TaskState):
+ """AnalyzeJobStateTasksEntityRecognitionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'EntitiesResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, results=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksEntityRecognitionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class AnalyzeJobStateTasksKeyPhraseExtractionTasksItem(TaskState):
+ """AnalyzeJobStateTasksKeyPhraseExtractionTasksItem.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param name:
+ :type name: str
+ :param status: Required.
+ :type status: object
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhraseResult
+ """
+
+ _validation = {
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'object'},
+ 'results': {'key': 'results', 'type': 'KeyPhraseResult'},
+ }
+
+ def __init__(self, *, last_update_date_time, status, name: str=None, results=None, **kwargs) -> None:
+ super(AnalyzeJobStateTasksKeyPhraseExtractionTasksItem, self).__init__(last_update_date_time=last_update_date_time, name=name, status=status, **kwargs)
+ self.results = results
+
+
+class DetectedLanguage(Model):
+ """DetectedLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Long name of a detected language (e.g. English,
+ French).
+ :type name: str
+ :param iso6391_name: Required. A two letter representation of the detected
+ language according to the ISO 639-1 standard (e.g. en, fr).
+ :type iso6391_name: str
+ :param confidence_score: Required. A confidence score between 0 and 1.
+ Scores close to 1 indicate 100% certainty that the identified language is
+ true.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'iso6391_name': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, name: str, iso6391_name: str, confidence_score: float, **kwargs) -> None:
+ super(DetectedLanguage, self).__init__(**kwargs)
+ self.name = name
+ self.iso6391_name = iso6391_name
+ self.confidence_score = confidence_score
+
+
+class DocumentEntities(Model):
+ """DocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentError(Model):
+ """DocumentError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Document Id.
+ :type id: str
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, id: str, error, **kwargs) -> None:
+ super(DocumentError, self).__init__(**kwargs)
+ self.id = id
+ self.error = error
+
+
+class DocumentHealthcareEntities(Model):
+ """DocumentHealthcareEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Healthcare entities.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntity]
+ :param relations: Required. Healthcare entity relations.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelation]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'relations': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareEntity]'},
+ 'relations': {'key': 'relations', 'type': '[HealthcareRelation]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, relations, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentHealthcareEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.relations = relations
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentKeyPhrases(Model):
+ """DocumentKeyPhrases.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param key_phrases: Required. A list of representative words or phrases.
+ The number of key phrases returned is proportional to the number of words
+ in the input document.
+ :type key_phrases: list[str]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'key_phrases': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, key_phrases, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentKeyPhrases, self).__init__(**kwargs)
+ self.id = id
+ self.key_phrases = key_phrases
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLanguage(Model):
+ """DocumentLanguage.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param detected_language: Required. Detected Language.
+ :type detected_language:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DetectedLanguage
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'detected_language': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'detected_language': {'key': 'detectedLanguage', 'type': 'DetectedLanguage'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, detected_language, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLanguage, self).__init__(**kwargs)
+ self.id = id
+ self.detected_language = detected_language
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentLinkedEntities(Model):
+ """DocumentLinkedEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param entities: Required. Recognized well known entities in the document.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.LinkedEntity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentLinkedEntities, self).__init__(**kwargs)
+ self.id = id
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class DocumentSentiment(Model):
+ """DocumentSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param sentiment: Required. Predicted sentiment for document (Negative,
+ Neutral, Positive, or Mixed). Possible values include: 'positive',
+ 'neutral', 'negative', 'mixed'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentimentValue
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ :param confidence_scores: Required. Document level sentiment confidence
+ scores between 0 and 1 for each sentiment class.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel
+ :param sentences: Required. Sentence level sentiment analysis.
+ :type sentences:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentiment]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'sentences': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ }
+
+ def __init__(self, *, id: str, sentiment, confidence_scores, sentences, warnings, statistics=None, **kwargs) -> None:
+ super(DocumentSentiment, self).__init__(**kwargs)
+ self.id = id
+ self.sentiment = sentiment
+ self.statistics = statistics
+ self.confidence_scores = confidence_scores
+ self.sentences = sentences
+ self.warnings = warnings
+
+
+class DocumentStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the document payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param characters_count: Required. Number of text elements recognized in
+ the document.
+ :type characters_count: int
+ :param transactions_count: Required. Number of transactions for the
+ document.
+ :type transactions_count: int
+ """
+
+ _validation = {
+ 'characters_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'characters_count': {'key': 'charactersCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
+ }
+
+ def __init__(self, *, characters_count: int, transactions_count: int, **kwargs) -> None:
+ super(DocumentStatistics, self).__init__(**kwargs)
+ self.characters_count = characters_count
+ self.transactions_count = transactions_count
+
+
+class EntitiesResult(Model):
+ """EntitiesResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntitiesResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class EntitiesTask(Model):
+ """EntitiesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntitiesTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(EntitiesTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class EntitiesTaskParameters(Model):
+ """EntitiesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, *, model_version: str="latest", string_index_type="TextElements_v8", **kwargs) -> None:
+ super(EntitiesTaskParameters, self).__init__(**kwargs)
+ self.model_version = model_version
+ self.string_index_type = string_index_type
+
+
+class Entity(Model):
+ """Entity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ }
+
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, subcategory: str=None, **kwargs) -> None:
+ super(Entity, self).__init__(**kwargs)
+ self.text = text
+ self.category = category
+ self.subcategory = subcategory
+ self.offset = offset
+ self.length = length
+ self.confidence_score = confidence_score
+
+
+class EntityLinkingResult(Model):
+ """EntityLinkingResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLinkedEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(EntityLinkingResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class EntityLinkingTask(Model):
+ """EntityLinkingTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'EntityLinkingTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(EntityLinkingTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class EntityLinkingTaskParameters(Model):
+ """EntityLinkingTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, *, model_version: str="latest", string_index_type="TextElements_v8", **kwargs) -> None:
+ super(EntityLinkingTaskParameters, self).__init__(**kwargs)
+ self.model_version = model_version
+ self.string_index_type = string_index_type
+
+
+class ErrorResponse(Model):
+ """ErrorResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param error: Required. Document Error.
+ :type error:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError
+ """
+
+ _validation = {
+ 'error': {'required': True},
+ }
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'TextAnalyticsError'},
+ }
+
+ def __init__(self, *, error, **kwargs) -> None:
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.error = error
+
+
+class HealthcareAssertion(Model):
+ """HealthcareAssertion.
+
+ :param conditionality: Describes any conditionality on the entity.
+ Possible values include: 'Hypothetical', 'Conditional'
+ :type conditionality: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Conditionality
+ :param certainty: Describes the entities certainty and polarity. Possible
+ values include: 'Positive', 'Positive Possible', 'Neutral Possible',
+ 'Negative Possible', 'Negative'
+ :type certainty: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Certainty
+ :param association: Describes if the entity is the subject of the text or
+ if it describes someone else. Possible values include: 'subject', 'other'
+ :type association: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.Association
+ """
+
+ _attribute_map = {
+ 'conditionality': {'key': 'conditionality', 'type': 'Conditionality'},
+ 'certainty': {'key': 'certainty', 'type': 'Certainty'},
+ 'association': {'key': 'association', 'type': 'Association'},
+ }
+
+ def __init__(self, *, conditionality=None, certainty=None, association=None, **kwargs) -> None:
+ super(HealthcareAssertion, self).__init__(**kwargs)
+ self.conditionality = conditionality
+ self.certainty = certainty
+ self.association = association
+
+
+class HealthcareEntity(Entity):
+ """HealthcareEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param category: Required. Entity type.
+ :type category: str
+ :param subcategory: (Optional) Entity sub type.
+ :type subcategory: str
+ :param offset: Required. Start position for the entity text. Use of
+ different 'stringIndexType' values can affect the offset returned.
+ :type offset: int
+ :param length: Required. Length for the entity text. Use of different
+ 'stringIndexType' values can affect the length returned.
+ :type length: int
+ :param confidence_score: Required. Confidence score between 0 and 1 of the
+ extracted entity.
+ :type confidence_score: float
+ :param assertion:
+ :type assertion:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareAssertion
+ :param name: Preferred name for the entity. Example: 'histologically'
+ would have a 'name' of 'histologic'.
+ :type name: str
+ :param links: Entity references in known data sources.
+ :type links:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareEntityLink]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'category': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'confidence_score': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'category': {'key': 'category', 'type': 'str'},
+ 'subcategory': {'key': 'subcategory', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'assertion': {'key': 'assertion', 'type': 'HealthcareAssertion'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'links': {'key': 'links', 'type': '[HealthcareEntityLink]'},
+ }
+
+ def __init__(self, *, text: str, category: str, offset: int, length: int, confidence_score: float, subcategory: str=None, assertion=None, name: str=None, links=None, **kwargs) -> None:
+ super(HealthcareEntity, self).__init__(text=text, category=category, subcategory=subcategory, offset=offset, length=length, confidence_score=confidence_score, **kwargs)
+ self.assertion = assertion
+ self.name = name
+ self.links = links
+
+
+class HealthcareEntityLink(Model):
+ """HealthcareEntityLink.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param data_source: Required. Entity Catalog. Examples include: UMLS, CHV,
+ MSH, etc.
+ :type data_source: str
+ :param id: Required. Entity id in the given source catalog.
+ :type id: str
+ """
+
+ _validation = {
+ 'data_source': {'required': True},
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ }
+
+ def __init__(self, *, data_source: str, id: str, **kwargs) -> None:
+ super(HealthcareEntityLink, self).__init__(**kwargs)
+ self.data_source = data_source
+ self.id = id
+
+
+class HealthcareJobState(Model):
+ """HealthcareJobState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param created_date_time: Required.
+ :type created_date_time: datetime
+ :param expiration_date_time:
+ :type expiration_date_time: datetime
+ :param job_id: Required.
+ :type job_id: str
+ :param last_update_date_time: Required.
+ :type last_update_date_time: datetime
+ :param status: Required. Possible values include: 'notStarted', 'running',
+ 'succeeded', 'failed', 'cancelled', 'cancelling', 'partiallyCompleted'
+ :type status: str or ~azure.ai.textanalytics.v3_1_preview_4.models.State
+ :param results:
+ :type results:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareResult
+ :param errors:
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ :param next_link:
+ :type next_link: str
+ """
+
+ _validation = {
+ 'created_date_time': {'required': True},
+ 'job_id': {'required': True},
+ 'last_update_date_time': {'required': True},
+ 'status': {'required': True},
+ }
+
+ _attribute_map = {
+ 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
+ 'expiration_date_time': {'key': 'expirationDateTime', 'type': 'iso-8601'},
+ 'job_id': {'key': 'jobId', 'type': 'str'},
+ 'last_update_date_time': {'key': 'lastUpdateDateTime', 'type': 'iso-8601'},
+ 'status': {'key': 'status', 'type': 'State'},
+ 'results': {'key': 'results', 'type': 'HealthcareResult'},
+ 'errors': {'key': 'errors', 'type': '[TextAnalyticsError]'},
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, created_date_time, job_id: str, last_update_date_time, status, expiration_date_time=None, results=None, errors=None, next_link: str=None, **kwargs) -> None:
+ super(HealthcareJobState, self).__init__(**kwargs)
+ self.created_date_time = created_date_time
+ self.expiration_date_time = expiration_date_time
+ self.job_id = job_id
+ self.last_update_date_time = last_update_date_time
+ self.status = status
+ self.results = results
+ self.errors = errors
+ self.next_link = next_link
+
+
+class HealthcareRelation(Model):
+ """Every relation is an entity graph of a certain relationType, where all
+ entities are connected and have specific roles within the relation context.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. Type of relation. Examples include:
+ `DosageOfMedication` or 'FrequencyOfMedication', etc. Possible values
+ include: 'Abbreviation', 'DirectionOfBodyStructure',
+ 'DirectionOfCondition', 'DirectionOfExamination', 'DirectionOfTreatment',
+ 'DosageOfMedication', 'FormOfMedication', 'FrequencyOfMedication',
+ 'FrequencyOfTreatment', 'QualifierOfCondition', 'RelationOfExamination',
+ 'RouteOfMedication', 'TimeOfCondition', 'TimeOfEvent',
+ 'TimeOfExamination', 'TimeOfMedication', 'TimeOfTreatment',
+ 'UnitOfCondition', 'UnitOfExamination', 'ValueOfCondition',
+ 'ValueOfExamination'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RelationType
+ :param entities: Required. The entities in the relation.
+ :type entities:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.HealthcareRelationEntity]
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'entities': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[HealthcareRelationEntity]'},
+ }
+
+ def __init__(self, *, relation_type, entities, **kwargs) -> None:
+ super(HealthcareRelation, self).__init__(**kwargs)
+ self.relation_type = relation_type
+ self.entities = entities
+
+
+class HealthcareRelationEntity(Model):
+ """HealthcareRelationEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param ref: Required. Reference link object, using a JSON pointer RFC 6901
+ (URI Fragment Identifier Representation), pointing to the entity .
+ :type ref: str
+ :param role: Required. Role of entity in the relationship. For example:
+ 'CD20-positive diffuse large B-cell lymphoma' has the following entities
+ with their roles in parenthesis: CD20 (GeneOrProtein), Positive
+ (Expression), diffuse large B-cell lymphoma (Diagnosis).
+ :type role: str
+ """
+
+ _validation = {
+ 'ref': {'required': True},
+ 'role': {'required': True},
+ }
+
+ _attribute_map = {
+ 'ref': {'key': 'ref', 'type': 'str'},
+ 'role': {'key': 'role', 'type': 'str'},
+ }
+
+ def __init__(self, *, ref: str, role: str, **kwargs) -> None:
+ super(HealthcareRelationEntity, self).__init__(**kwargs)
+ self.ref = ref
+ self.role = role
+
+
+class HealthcareResult(Model):
+ """HealthcareResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentHealthcareEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentHealthcareEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(HealthcareResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class InnerError(Model):
+ """InnerError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidParameterValue', 'InvalidRequestBodyFormat', 'EmptyRequest',
+ 'MissingInputRecords', 'InvalidDocument', 'ModelVersionIncorrect',
+ 'InvalidDocumentBatch', 'UnsupportedLanguageCode', 'InvalidCountryHint'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.InnerErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param details: Error details.
+ :type details: dict[str, str]
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '{str}'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ }
+
+ def __init__(self, *, code, message: str, details=None, target: str=None, innererror=None, **kwargs) -> None:
+ super(InnerError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.details = details
+ self.target = target
+ self.innererror = innererror
+
+
+class JobDescriptor(Model):
+ """JobDescriptor.
+
+ :param display_name: Optional display name for the analysis job.
+ :type display_name: str
+ """
+
+ _attribute_map = {
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ }
+
+ def __init__(self, *, display_name: str=None, **kwargs) -> None:
+ super(JobDescriptor, self).__init__(**kwargs)
+ self.display_name = display_name
+
+
+class JobManifest(Model):
+ """JobManifest.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required. The set of tasks to execute on the input
+ documents. Cannot specify the same task more than once.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.JobManifestTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'JobManifestTasks'},
+ }
+
+ def __init__(self, *, tasks, **kwargs) -> None:
+ super(JobManifest, self).__init__(**kwargs)
+ self.tasks = tasks
+
+
+class JobManifestTasks(Model):
+ """The set of tasks to execute on the input documents. Cannot specify the same
+ task more than once.
+
+ :param entity_recognition_tasks:
+ :type entity_recognition_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.EntitiesTask]
+ :param entity_recognition_pii_tasks:
+ :type entity_recognition_pii_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiTask]
+ :param key_phrase_extraction_tasks:
+ :type key_phrase_extraction_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTask]
+ :param entity_linking_tasks:
+ :type entity_linking_tasks:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.EntityLinkingTask]
+ """
+
+ _attribute_map = {
+ 'entity_recognition_tasks': {'key': 'entityRecognitionTasks', 'type': '[EntitiesTask]'},
+ 'entity_recognition_pii_tasks': {'key': 'entityRecognitionPiiTasks', 'type': '[PiiTask]'},
+ 'key_phrase_extraction_tasks': {'key': 'keyPhraseExtractionTasks', 'type': '[KeyPhrasesTask]'},
+ 'entity_linking_tasks': {'key': 'entityLinkingTasks', 'type': '[EntityLinkingTask]'},
+ }
+
+ def __init__(self, *, entity_recognition_tasks=None, entity_recognition_pii_tasks=None, key_phrase_extraction_tasks=None, entity_linking_tasks=None, **kwargs) -> None:
+ super(JobManifestTasks, self).__init__(**kwargs)
+ self.entity_recognition_tasks = entity_recognition_tasks
+ self.entity_recognition_pii_tasks = entity_recognition_pii_tasks
+ self.key_phrase_extraction_tasks = key_phrase_extraction_tasks
+ self.entity_linking_tasks = entity_linking_tasks
+
+
+class KeyPhraseResult(Model):
+ """KeyPhraseResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentKeyPhrases]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(KeyPhraseResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class KeyPhrasesTask(Model):
+ """KeyPhrasesTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.KeyPhrasesTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'KeyPhrasesTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(KeyPhrasesTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class KeyPhrasesTaskParameters(Model):
+ """KeyPhrasesTaskParameters.
+
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ """
+
+ _attribute_map = {
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ }
+
+ def __init__(self, *, model_version: str="latest", **kwargs) -> None:
+ super(KeyPhrasesTaskParameters, self).__init__(**kwargs)
+ self.model_version = model_version
+
+
+class LanguageBatchInput(Model):
+ """LanguageBatchInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[LanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(LanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class LanguageInput(Model):
+ """LanguageInput.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param text: Required.
+ :type text: str
+ :param country_hint:
+ :type country_hint: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'country_hint': {'key': 'countryHint', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, country_hint: str=None, **kwargs) -> None:
+ super(LanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.country_hint = country_hint
+
+
+class LanguageResult(Model):
+ """LanguageResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentLanguage]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(LanguageResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class LinkedEntity(Model):
+ """LinkedEntity.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. Entity Linking formal name.
+ :type name: str
+ :param matches: Required. List of instances this entity appears in the
+ text.
+ :type matches: list[~azure.ai.textanalytics.v3_1_preview_4.models.Match]
+ :param language: Required. Language used in the data source.
+ :type language: str
+ :param id: Unique identifier of the recognized entity from the data
+ source.
+ :type id: str
+ :param url: Required. URL for the entity's page from the data source.
+ :type url: str
+ :param data_source: Required. Data source used to extract entity linking,
+ such as Wiki/Bing etc.
+ :type data_source: str
+ :param bing_id: Bing Entity Search API unique identifier of the recognized
+ entity.
+ :type bing_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'matches': {'required': True},
+ 'language': {'required': True},
+ 'url': {'required': True},
+ 'data_source': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'matches': {'key': 'matches', 'type': '[Match]'},
+ 'language': {'key': 'language', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'url': {'key': 'url', 'type': 'str'},
+ 'data_source': {'key': 'dataSource', 'type': 'str'},
+ 'bing_id': {'key': 'bingId', 'type': 'str'},
+ }
+
+ def __init__(self, *, name: str, matches, language: str, url: str, data_source: str, id: str=None, bing_id: str=None, **kwargs) -> None:
+ super(LinkedEntity, self).__init__(**kwargs)
+ self.name = name
+ self.matches = matches
+ self.language = language
+ self.id = id
+ self.url = url
+ self.data_source = data_source
+ self.bing_id = bing_id
+
+
+class Match(Model):
+ """Match.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param confidence_score: Required. If a well known item is recognized, a
+ decimal number denoting the confidence level between 0 and 1 will be
+ returned.
+ :type confidence_score: float
+ :param text: Required. Entity text as appears in the request.
+ :type text: str
+ :param offset: Required. Start position for the entity match text.
+ :type offset: int
+ :param length: Required. Length for the entity match text.
+ :type length: int
+ """
+
+ _validation = {
+ 'confidence_score': {'required': True},
+ 'text': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'confidence_score': {'key': 'confidenceScore', 'type': 'float'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ }
+
+ def __init__(self, *, confidence_score: float, text: str, offset: int, length: int, **kwargs) -> None:
+ super(Match, self).__init__(**kwargs)
+ self.confidence_score = confidence_score
+ self.text = text
+ self.offset = offset
+ self.length = length
+
+
+class MultiLanguageBatchInput(Model):
+ """Contains a set of input documents to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. The set of documents to process as part of
+ this batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
+ }
+
+ def __init__(self, *, documents, **kwargs) -> None:
+ super(MultiLanguageBatchInput, self).__init__(**kwargs)
+ self.documents = documents
+
+
+class MultiLanguageInput(Model):
+ """Contains an input document to be analyzed by the service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. A unique, non-empty document identifier.
+ :type id: str
+ :param text: Required. The input text to process.
+ :type text: str
+ :param language: (Optional) This is the 2 letter ISO 639-1 representation
+ of a language. For example, use "en" for English; "es" for Spanish etc. If
+ not set, use "en" for English as default.
+ :type language: str
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'text': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'language': {'key': 'language', 'type': 'str'},
+ }
+
+ def __init__(self, *, id: str, text: str, language: str=None, **kwargs) -> None:
+ super(MultiLanguageInput, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
+ self.language = language
+
+
+class Pagination(Model):
+ """Pagination.
+
+ :param next_link:
+ :type next_link: str
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': '@nextLink', 'type': 'str'},
+ }
+
+ def __init__(self, *, next_link: str=None, **kwargs) -> None:
+ super(Pagination, self).__init__(**kwargs)
+ self.next_link = next_link
+
+
+class PiiDocumentEntities(Model):
+ """PiiDocumentEntities.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique, non-empty document identifier.
+ :type id: str
+ :param redacted_text: Required. Returns redacted text.
+ :type redacted_text: str
+ :param entities: Required. Recognized entities in the document.
+ :type entities: list[~azure.ai.textanalytics.v3_1_preview_4.models.Entity]
+ :param warnings: Required. Warnings encountered while processing document.
+ :type warnings:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsWarning]
+ :param statistics: if showStats=true was specified in the request this
+ field will contain information about the document payload.
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.DocumentStatistics
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ 'redacted_text': {'required': True},
+ 'entities': {'required': True},
+ 'warnings': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'redacted_text': {'key': 'redactedText', 'type': 'str'},
+ 'entities': {'key': 'entities', 'type': '[Entity]'},
+ 'warnings': {'key': 'warnings', 'type': '[TextAnalyticsWarning]'},
+ 'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
+ }
+
+ def __init__(self, *, id: str, redacted_text: str, entities, warnings, statistics=None, **kwargs) -> None:
+ super(PiiDocumentEntities, self).__init__(**kwargs)
+ self.id = id
+ self.redacted_text = redacted_text
+ self.entities = entities
+ self.warnings = warnings
+ self.statistics = statistics
+
+
+class PiiResult(Model):
+ """PiiResult.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Response by document
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.PiiDocumentEntities]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[PiiDocumentEntities]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(PiiResult, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class PiiTask(Model):
+ """PiiTask.
+
+ :param parameters:
+ :type parameters:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.PiiTaskParameters
+ """
+
+ _attribute_map = {
+ 'parameters': {'key': 'parameters', 'type': 'PiiTaskParameters'},
+ }
+
+ def __init__(self, *, parameters=None, **kwargs) -> None:
+ super(PiiTask, self).__init__(**kwargs)
+ self.parameters = parameters
+
+
+class PiiTaskParameters(Model):
+ """PiiTaskParameters.
+
+ :param domain: Possible values include: 'phi', 'none'. Default value:
+ "none" .
+ :type domain: str or ~azure.ai.textanalytics.v3_1_preview_4.models.enum
+ :param model_version: Default value: "latest" .
+ :type model_version: str
+ :param pii_categories:
+ :type pii_categories: list[str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory]
+ :param string_index_type: Possible values include: 'TextElements_v8',
+ 'UnicodeCodePoint', 'Utf16CodeUnit'. Default value: "TextElements_v8" .
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexTypeResponse
+ """
+
+ _attribute_map = {
+ 'domain': {'key': 'domain', 'type': 'str'},
+ 'model_version': {'key': 'model-version', 'type': 'str'},
+ 'pii_categories': {'key': 'pii-categories', 'type': '[str]'},
+ 'string_index_type': {'key': 'stringIndexType', 'type': 'str'},
+ }
+
+ def __init__(self, *, domain="none", model_version: str="latest", pii_categories=None, string_index_type="TextElements_v8", **kwargs) -> None:
+ super(PiiTaskParameters, self).__init__(**kwargs)
+ self.domain = domain
+ self.model_version = model_version
+ self.pii_categories = pii_categories
+ self.string_index_type = string_index_type
+
+
+class RequestStatistics(Model):
+ """if showStats=true was specified in the request this field will contain
+ information about the request payload.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents_count: Required. Number of documents submitted in the
+ request.
+ :type documents_count: int
+ :param valid_documents_count: Required. Number of valid documents. This
+ excludes empty, over-size limit or non-supported languages documents.
+ :type valid_documents_count: int
+ :param erroneous_documents_count: Required. Number of invalid documents.
+ This includes empty, over-size limit or non-supported languages documents.
+ :type erroneous_documents_count: int
+ :param transactions_count: Required. Number of transactions for the
+ request.
+ :type transactions_count: long
+ """
+
+ _validation = {
+ 'documents_count': {'required': True},
+ 'valid_documents_count': {'required': True},
+ 'erroneous_documents_count': {'required': True},
+ 'transactions_count': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents_count': {'key': 'documentsCount', 'type': 'int'},
+ 'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
+ 'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
+ 'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
+ }
+
+ def __init__(self, *, documents_count: int, valid_documents_count: int, erroneous_documents_count: int, transactions_count: int, **kwargs) -> None:
+ super(RequestStatistics, self).__init__(**kwargs)
+ self.documents_count = documents_count
+ self.valid_documents_count = valid_documents_count
+ self.erroneous_documents_count = erroneous_documents_count
+ self.transactions_count = transactions_count
+
+
+class SentenceAssessment(Model):
+ """SentenceAssessment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Assessment sentiment in the sentence. Possible
+ values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue
+ :param confidence_scores: Required. Assessment sentiment confidence scores
+ in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel
+ :param offset: Required. The assessment offset from the start of the
+ sentence.
+ :type offset: int
+ :param length: Required. The length of the assessment.
+ :type length: int
+ :param text: Required. The assessment text detected.
+ :type text: str
+ :param is_negated: Required. The indicator representing if the assessment
+ is negated.
+ :type is_negated: bool
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'is_negated': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'is_negated': {'key': 'isNegated', 'type': 'bool'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, is_negated: bool, **kwargs) -> None:
+ super(SentenceAssessment, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.is_negated = is_negated
+
+
+class SentenceSentiment(Model):
+ """SentenceSentiment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param text: Required. The sentence text.
+ :type text: str
+ :param sentiment: Required. The predicted Sentiment for the sentence.
+ Possible values include: 'positive', 'neutral', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentenceSentimentValue
+ :param confidence_scores: Required. The sentiment confidence score between
+ 0 and 1 for the sentence for all classes.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.SentimentConfidenceScorePerLabel
+ :param offset: Required. The sentence offset from the start of the
+ document.
+ :type offset: int
+ :param length: Required. The length of the sentence.
+ :type length: int
+ :param targets: The array of sentence targets for the sentence.
+ :type targets:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceTarget]
+ :param assessments: The array of assessments for the sentence.
+ :type assessments:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.SentenceAssessment]
+ """
+
+ _validation = {
+ 'text': {'required': True},
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ }
+
+ _attribute_map = {
+ 'text': {'key': 'text', 'type': 'str'},
+ 'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'targets': {'key': 'targets', 'type': '[SentenceTarget]'},
+ 'assessments': {'key': 'assessments', 'type': '[SentenceAssessment]'},
+ }
+
+ def __init__(self, *, text: str, sentiment, confidence_scores, offset: int, length: int, targets=None, assessments=None, **kwargs) -> None:
+ super(SentenceSentiment, self).__init__(**kwargs)
+ self.text = text
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.targets = targets
+ self.assessments = assessments
+
+
+class SentenceTarget(Model):
+ """SentenceTarget.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param sentiment: Required. Targeted sentiment in the sentence. Possible
+ values include: 'positive', 'mixed', 'negative'
+ :type sentiment: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TokenSentimentValue
+ :param confidence_scores: Required. Target sentiment confidence scores for
+ the target in the sentence.
+ :type confidence_scores:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetConfidenceScoreLabel
+ :param offset: Required. The target offset from the start of the sentence.
+ :type offset: int
+ :param length: Required. The length of the target.
+ :type length: int
+ :param text: Required. The target text detected.
+ :type text: str
+ :param relations: Required. The array of either assessment or target
+ objects which is related to the target.
+ :type relations:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelation]
+ """
+
+ _validation = {
+ 'sentiment': {'required': True},
+ 'confidence_scores': {'required': True},
+ 'offset': {'required': True},
+ 'length': {'required': True},
+ 'text': {'required': True},
+ 'relations': {'required': True},
+ }
+
+ _attribute_map = {
+ 'sentiment': {'key': 'sentiment', 'type': 'TokenSentimentValue'},
+ 'confidence_scores': {'key': 'confidenceScores', 'type': 'TargetConfidenceScoreLabel'},
+ 'offset': {'key': 'offset', 'type': 'int'},
+ 'length': {'key': 'length', 'type': 'int'},
+ 'text': {'key': 'text', 'type': 'str'},
+ 'relations': {'key': 'relations', 'type': '[TargetRelation]'},
+ }
+
+ def __init__(self, *, sentiment, confidence_scores, offset: int, length: int, text: str, relations, **kwargs) -> None:
+ super(SentenceTarget, self).__init__(**kwargs)
+ self.sentiment = sentiment
+ self.confidence_scores = confidence_scores
+ self.offset = offset
+ self.length = length
+ self.text = text
+ self.relations = relations
+
+
+class SentimentConfidenceScorePerLabel(Model):
+ """Represents the confidence scores between 0 and 1 across all sentiment
+ classes: positive, neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param neutral: Required.
+ :type neutral: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'neutral': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'neutral': {'key': 'neutral', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, neutral: float, negative: float, **kwargs) -> None:
+ super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.neutral = neutral
+ self.negative = negative
+
+
+class SentimentResponse(Model):
+ """SentimentResponse.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param documents: Required. Sentiment analysis per document.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentSentiment]
+ :param errors: Required. Errors by document id.
+ :type errors:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.DocumentError]
+ :param statistics:
+ :type statistics:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.RequestStatistics
+ :param model_version: Required. This field indicates which model is used
+ for scoring.
+ :type model_version: str
+ """
+
+ _validation = {
+ 'documents': {'required': True},
+ 'errors': {'required': True},
+ 'model_version': {'required': True},
+ }
+
+ _attribute_map = {
+ 'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
+ 'errors': {'key': 'errors', 'type': '[DocumentError]'},
+ 'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
+ 'model_version': {'key': 'modelVersion', 'type': 'str'},
+ }
+
+ def __init__(self, *, documents, errors, model_version: str, statistics=None, **kwargs) -> None:
+ super(SentimentResponse, self).__init__(**kwargs)
+ self.documents = documents
+ self.errors = errors
+ self.statistics = statistics
+ self.model_version = model_version
+
+
+class TargetConfidenceScoreLabel(Model):
+ """Represents the confidence scores across all sentiment classes: positive,
+ neutral, negative.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param positive: Required.
+ :type positive: float
+ :param negative: Required.
+ :type negative: float
+ """
+
+ _validation = {
+ 'positive': {'required': True},
+ 'negative': {'required': True},
+ }
+
+ _attribute_map = {
+ 'positive': {'key': 'positive', 'type': 'float'},
+ 'negative': {'key': 'negative', 'type': 'float'},
+ }
+
+ def __init__(self, *, positive: float, negative: float, **kwargs) -> None:
+ super(TargetConfidenceScoreLabel, self).__init__(**kwargs)
+ self.positive = positive
+ self.negative = negative
+
+
+class TargetRelation(Model):
+ """TargetRelation.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param relation_type: Required. The type related to the target. Possible
+ values include: 'assessment', 'target'
+ :type relation_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.TargetRelationType
+ :param ref: Required. The JSON pointer indicating the linked object.
+ :type ref: str
+ """
+
+ _validation = {
+ 'relation_type': {'required': True},
+ 'ref': {'required': True},
+ }
+
+ _attribute_map = {
+ 'relation_type': {'key': 'relationType', 'type': 'TargetRelationType'},
+ 'ref': {'key': 'ref', 'type': 'str'},
+ }
+
+ def __init__(self, *, relation_type, ref: str, **kwargs) -> None:
+ super(TargetRelation, self).__init__(**kwargs)
+ self.relation_type = relation_type
+ self.ref = ref
+
+
+class TasksState(Model):
+ """TasksState.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param tasks: Required.
+ :type tasks:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeJobStateTasks
+ """
+
+ _validation = {
+ 'tasks': {'required': True},
+ }
+
+ _attribute_map = {
+ 'tasks': {'key': 'tasks', 'type': 'AnalyzeJobStateTasks'},
+ }
+
+ def __init__(self, *, tasks, **kwargs) -> None:
+ super(TasksState, self).__init__(**kwargs)
+ self.tasks = tasks
+
+
+class TextAnalyticsError(Model):
+ """TextAnalyticsError.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'InvalidRequest', 'InvalidArgument', 'InternalServerError',
+ 'ServiceUnavailable', 'NotFound'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorCodeValue
+ :param message: Required. Error message.
+ :type message: str
+ :param target: Error target.
+ :type target: str
+ :param innererror: Inner error contains more specific information.
+ :type innererror: ~azure.ai.textanalytics.v3_1_preview_4.models.InnerError
+ :param details: Details about specific errors that led to this reported
+ error.
+ :type details:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.TextAnalyticsError]
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'ErrorCodeValue'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ 'innererror': {'key': 'innererror', 'type': 'InnerError'},
+ 'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
+ }
+
+ def __init__(self, *, code, message: str, target: str=None, innererror=None, details=None, **kwargs) -> None:
+ super(TextAnalyticsError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target = target
+ self.innererror = innererror
+ self.details = details
+
+
+class TextAnalyticsWarning(Model):
+ """TextAnalyticsWarning.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param code: Required. Error code. Possible values include:
+ 'LongWordsInDocument', 'DocumentTruncated'
+ :type code: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.WarningCodeValue
+ :param message: Required. Warning message.
+ :type message: str
+ :param target_ref: A JSON pointer reference indicating the target object.
+ :type target_ref: str
+ """
+
+ _validation = {
+ 'code': {'required': True},
+ 'message': {'required': True},
+ }
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target_ref': {'key': 'targetRef', 'type': 'str'},
+ }
+
+ def __init__(self, *, code, message: str, target_ref: str=None, **kwargs) -> None:
+ super(TextAnalyticsWarning, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target_ref = target_ref
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_text_analytics_client_enums.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_text_analytics_client_enums.py
new file mode 100644
index 000000000000..633e4cbfd959
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/models/_text_analytics_client_enums.py
@@ -0,0 +1,316 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+
+
+class StringIndexTypeResponse(str, Enum):
+
+ text_elements_v8 = "TextElements_v8" #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is written in .Net Framework or .Net Core and you will be using StringInfo.
+ unicode_code_point = "UnicodeCodePoint" #: Returned offset and length values will correspond to Unicode code points. Use this option if your application is written in a language that support Unicode, for example Python.
+ utf16_code_unit = "Utf16CodeUnit" #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your application is written in a language that support Unicode, for example Java, JavaScript.
+
+
+class PiiCategory(str, Enum):
+
+ aba_routing_number = "ABARoutingNumber"
+ ar_national_identity_number = "ARNationalIdentityNumber"
+ au_bank_account_number = "AUBankAccountNumber"
+ au_drivers_license_number = "AUDriversLicenseNumber"
+ au_medical_account_number = "AUMedicalAccountNumber"
+ au_passport_number = "AUPassportNumber"
+ au_tax_file_number = "AUTaxFileNumber"
+ au_business_number = "AUBusinessNumber"
+ au_company_number = "AUCompanyNumber"
+ at_identity_card = "ATIdentityCard"
+ at_tax_identification_number = "ATTaxIdentificationNumber"
+ at_value_added_tax_number = "ATValueAddedTaxNumber"
+ azure_document_db_auth_key = "AzureDocumentDBAuthKey"
+ azure_iaas_database_connection_and_sql_string = "AzureIAASDatabaseConnectionAndSQLString"
+ azure_io_tconnection_string = "AzureIoTConnectionString"
+ azure_publish_setting_password = "AzurePublishSettingPassword"
+ azure_redis_cache_string = "AzureRedisCacheString"
+ azure_sas = "AzureSAS"
+ azure_service_bus_string = "AzureServiceBusString"
+ azure_storage_account_key = "AzureStorageAccountKey"
+ azure_storage_account_generic = "AzureStorageAccountGeneric"
+ be_national_number = "BENationalNumber"
+ be_national_number_v2 = "BENationalNumberV2"
+ be_value_added_tax_number = "BEValueAddedTaxNumber"
+ brcpf_number = "BRCPFNumber"
+ br_legal_entity_number = "BRLegalEntityNumber"
+ br_national_idrg = "BRNationalIDRG"
+ bg_uniform_civil_number = "BGUniformCivilNumber"
+ ca_bank_account_number = "CABankAccountNumber"
+ ca_drivers_license_number = "CADriversLicenseNumber"
+ ca_health_service_number = "CAHealthServiceNumber"
+ ca_passport_number = "CAPassportNumber"
+ ca_personal_health_identification = "CAPersonalHealthIdentification"
+ ca_social_insurance_number = "CASocialInsuranceNumber"
+ cl_identity_card_number = "CLIdentityCardNumber"
+ cn_resident_identity_card_number = "CNResidentIdentityCardNumber"
+ credit_card_number = "CreditCardNumber"
+ hr_identity_card_number = "HRIdentityCardNumber"
+ hr_national_id_number = "HRNationalIDNumber"
+ hr_personal_identification_number = "HRPersonalIdentificationNumber"
+ hr_personal_identification_oib_number_v2 = "HRPersonalIdentificationOIBNumberV2"
+ cy_identity_card = "CYIdentityCard"
+ cy_tax_identification_number = "CYTaxIdentificationNumber"
+ cz_personal_identity_number = "CZPersonalIdentityNumber"
+ cz_personal_identity_v2 = "CZPersonalIdentityV2"
+ dk_personal_identification_number = "DKPersonalIdentificationNumber"
+ dk_personal_identification_v2 = "DKPersonalIdentificationV2"
+ drug_enforcement_agency_number = "DrugEnforcementAgencyNumber"
+ ee_personal_identification_code = "EEPersonalIdentificationCode"
+ eu_debit_card_number = "EUDebitCardNumber"
+ eu_drivers_license_number = "EUDriversLicenseNumber"
+ eugps_coordinates = "EUGPSCoordinates"
+ eu_national_identification_number = "EUNationalIdentificationNumber"
+ eu_passport_number = "EUPassportNumber"
+ eu_social_security_number = "EUSocialSecurityNumber"
+ eu_tax_identification_number = "EUTaxIdentificationNumber"
+ fi_european_health_number = "FIEuropeanHealthNumber"
+ fi_national_id = "FINationalID"
+ fi_national_idv2 = "FINationalIDV2"
+ fi_passport_number = "FIPassportNumber"
+ fr_drivers_license_number = "FRDriversLicenseNumber"
+ fr_health_insurance_number = "FRHealthInsuranceNumber"
+ fr_national_id = "FRNationalID"
+ fr_passport_number = "FRPassportNumber"
+ fr_social_security_number = "FRSocialSecurityNumber"
+ fr_tax_identification_number = "FRTaxIdentificationNumber"
+ fr_value_added_tax_number = "FRValueAddedTaxNumber"
+ de_drivers_license_number = "DEDriversLicenseNumber"
+ de_passport_number = "DEPassportNumber"
+ de_identity_card_number = "DEIdentityCardNumber"
+ de_tax_identification_number = "DETaxIdentificationNumber"
+ de_value_added_number = "DEValueAddedNumber"
+ gr_national_id_card = "GRNationalIDCard"
+ gr_national_idv2 = "GRNationalIDV2"
+ gr_tax_identification_number = "GRTaxIdentificationNumber"
+ hk_identity_card_number = "HKIdentityCardNumber"
+ hu_value_added_number = "HUValueAddedNumber"
+ hu_personal_identification_number = "HUPersonalIdentificationNumber"
+ hu_tax_identification_number = "HUTaxIdentificationNumber"
+ in_permanent_account = "INPermanentAccount"
+ in_unique_identification_number = "INUniqueIdentificationNumber"
+ id_identity_card_number = "IDIdentityCardNumber"
+ international_banking_account_number = "InternationalBankingAccountNumber"
+ ie_personal_public_service_number = "IEPersonalPublicServiceNumber"
+ ie_personal_public_service_number_v2 = "IEPersonalPublicServiceNumberV2"
+ il_bank_account_number = "ILBankAccountNumber"
+ il_national_id = "ILNationalID"
+ it_drivers_license_number = "ITDriversLicenseNumber"
+ it_fiscal_code = "ITFiscalCode"
+ it_value_added_tax_number = "ITValueAddedTaxNumber"
+ jp_bank_account_number = "JPBankAccountNumber"
+ jp_drivers_license_number = "JPDriversLicenseNumber"
+ jp_passport_number = "JPPassportNumber"
+ jp_resident_registration_number = "JPResidentRegistrationNumber"
+ jp_social_insurance_number = "JPSocialInsuranceNumber"
+ jp_my_number_corporate = "JPMyNumberCorporate"
+ jp_my_number_personal = "JPMyNumberPersonal"
+ jp_residence_card_number = "JPResidenceCardNumber"
+ lv_personal_code = "LVPersonalCode"
+ lt_personal_code = "LTPersonalCode"
+ lu_national_identification_number_natural = "LUNationalIdentificationNumberNatural"
+ lu_national_identification_number_non_natural = "LUNationalIdentificationNumberNonNatural"
+ my_identity_card_number = "MYIdentityCardNumber"
+ mt_identity_card_number = "MTIdentityCardNumber"
+ mt_tax_id_number = "MTTaxIDNumber"
+ nl_citizens_service_number = "NLCitizensServiceNumber"
+ nl_citizens_service_number_v2 = "NLCitizensServiceNumberV2"
+ nl_tax_identification_number = "NLTaxIdentificationNumber"
+ nl_value_added_tax_number = "NLValueAddedTaxNumber"
+ nz_bank_account_number = "NZBankAccountNumber"
+ nz_drivers_license_number = "NZDriversLicenseNumber"
+ nz_inland_revenue_number = "NZInlandRevenueNumber"
+ nz_ministry_of_health_number = "NZMinistryOfHealthNumber"
+ nz_social_welfare_number = "NZSocialWelfareNumber"
+ no_identity_number = "NOIdentityNumber"
+ ph_unified_multi_purpose_id_number = "PHUnifiedMultiPurposeIDNumber"
+ pl_identity_card = "PLIdentityCard"
+ pl_national_id = "PLNationalID"
+ pl_national_idv2 = "PLNationalIDV2"
+ pl_passport_number = "PLPassportNumber"
+ pl_tax_identification_number = "PLTaxIdentificationNumber"
+ plregon_number = "PLREGONNumber"
+ pt_citizen_card_number = "PTCitizenCardNumber"
+ pt_citizen_card_number_v2 = "PTCitizenCardNumberV2"
+ pt_tax_identification_number = "PTTaxIdentificationNumber"
+ ro_personal_numerical_code = "ROPersonalNumericalCode"
+ ru_passport_number_domestic = "RUPassportNumberDomestic"
+ ru_passport_number_international = "RUPassportNumberInternational"
+ sa_national_id = "SANationalID"
+ sg_national_registration_identity_card_number = "SGNationalRegistrationIdentityCardNumber"
+ sk_personal_number = "SKPersonalNumber"
+ si_tax_identification_number = "SITaxIdentificationNumber"
+ si_unique_master_citizen_number = "SIUniqueMasterCitizenNumber"
+ za_identification_number = "ZAIdentificationNumber"
+ kr_resident_registration_number = "KRResidentRegistrationNumber"
+ esdni = "ESDNI"
+ es_social_security_number = "ESSocialSecurityNumber"
+ es_tax_identification_number = "ESTaxIdentificationNumber"
+ sql_server_connection_string = "SQLServerConnectionString"
+ se_national_id = "SENationalID"
+ se_national_idv2 = "SENationalIDV2"
+ se_passport_number = "SEPassportNumber"
+ se_tax_identification_number = "SETaxIdentificationNumber"
+ swift_code = "SWIFTCode"
+ ch_social_security_number = "CHSocialSecurityNumber"
+ tw_national_id = "TWNationalID"
+ tw_passport_number = "TWPassportNumber"
+ tw_resident_certificate = "TWResidentCertificate"
+ th_population_identification_code = "THPopulationIdentificationCode"
+ tr_national_identification_number = "TRNationalIdentificationNumber"
+ uk_drivers_license_number = "UKDriversLicenseNumber"
+ uk_electoral_roll_number = "UKElectoralRollNumber"
+ uk_national_health_number = "UKNationalHealthNumber"
+ uk_national_insurance_number = "UKNationalInsuranceNumber"
+ uk_unique_taxpayer_number = "UKUniqueTaxpayerNumber"
+ usuk_passport_number = "USUKPassportNumber"
+ us_bank_account_number = "USBankAccountNumber"
+ us_drivers_license_number = "USDriversLicenseNumber"
+ us_individual_taxpayer_identification = "USIndividualTaxpayerIdentification"
+ us_social_security_number = "USSocialSecurityNumber"
+ ua_passport_number_domestic = "UAPassportNumberDomestic"
+ ua_passport_number_international = "UAPassportNumberInternational"
+ organization = "Organization"
+ email = "Email"
+ url = "URL"
+ age = "Age"
+ phone_number = "PhoneNumber"
+ ip_address = "IPAddress"
+ date_enum = "Date"
+ person = "Person"
+ address = "Address"
+ all = "All"
+ default = "Default"
+
+
+class ErrorCodeValue(str, Enum):
+
+ invalid_request = "InvalidRequest"
+ invalid_argument = "InvalidArgument"
+ internal_server_error = "InternalServerError"
+ service_unavailable = "ServiceUnavailable"
+ not_found = "NotFound"
+
+
+class InnerErrorCodeValue(str, Enum):
+
+ invalid_parameter_value = "InvalidParameterValue"
+ invalid_request_body_format = "InvalidRequestBodyFormat"
+ empty_request = "EmptyRequest"
+ missing_input_records = "MissingInputRecords"
+ invalid_document = "InvalidDocument"
+ model_version_incorrect = "ModelVersionIncorrect"
+ invalid_document_batch = "InvalidDocumentBatch"
+ unsupported_language_code = "UnsupportedLanguageCode"
+ invalid_country_hint = "InvalidCountryHint"
+
+
+class WarningCodeValue(str, Enum):
+
+ long_words_in_document = "LongWordsInDocument"
+ document_truncated = "DocumentTruncated"
+
+
+class DocumentSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+ mixed = "mixed"
+
+
+class SentenceSentimentValue(str, Enum):
+
+ positive = "positive"
+ neutral = "neutral"
+ negative = "negative"
+
+
+class TokenSentimentValue(str, Enum):
+
+ positive = "positive"
+ mixed = "mixed"
+ negative = "negative"
+
+
+class TargetRelationType(str, Enum):
+
+ assessment = "assessment"
+ target = "target"
+
+
+class State(str, Enum):
+
+ not_started = "notStarted"
+ running = "running"
+ succeeded = "succeeded"
+ failed = "failed"
+ cancelled = "cancelled"
+ cancelling = "cancelling"
+ partially_completed = "partiallyCompleted"
+
+
+class Conditionality(str, Enum):
+
+ hypothetical = "Hypothetical"
+ conditional = "Conditional"
+
+
+class Certainty(str, Enum):
+
+ positive = "Positive"
+ positive_possible = "Positive Possible"
+ neutral_possible = "Neutral Possible"
+ negative_possible = "Negative Possible"
+ negative = "Negative"
+
+
+class Association(str, Enum):
+
+ subject = "subject"
+ other = "other"
+
+
+class RelationType(str, Enum):
+
+ abbreviation = "Abbreviation"
+ direction_of_body_structure = "DirectionOfBodyStructure"
+ direction_of_condition = "DirectionOfCondition"
+ direction_of_examination = "DirectionOfExamination"
+ direction_of_treatment = "DirectionOfTreatment"
+ dosage_of_medication = "DosageOfMedication"
+ form_of_medication = "FormOfMedication"
+ frequency_of_medication = "FrequencyOfMedication"
+ frequency_of_treatment = "FrequencyOfTreatment"
+ qualifier_of_condition = "QualifierOfCondition"
+ relation_of_examination = "RelationOfExamination"
+ route_of_medication = "RouteOfMedication"
+ time_of_condition = "TimeOfCondition"
+ time_of_event = "TimeOfEvent"
+ time_of_examination = "TimeOfExamination"
+ time_of_medication = "TimeOfMedication"
+ time_of_treatment = "TimeOfTreatment"
+ unit_of_condition = "UnitOfCondition"
+ unit_of_examination = "UnitOfExamination"
+ value_of_condition = "ValueOfCondition"
+ value_of_examination = "ValueOfExamination"
+
+
+class StringIndexType(str, Enum):
+
+ text_elements_v8 = "TextElements_v8" #: Returned offset and length values will correspond to TextElements (Graphemes and Grapheme clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is written in .Net Framework or .Net Core and you will be using StringInfo.
+ unicode_code_point = "UnicodeCodePoint" #: Returned offset and length values will correspond to Unicode code points. Use this option if your application is written in a language that support Unicode, for example Python.
+ utf16_code_unit = "Utf16CodeUnit" #: Returned offset and length values will correspond to UTF-16 code units. Use this option if your application is written in a language that support Unicode, for example Java, JavaScript.
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/__init__.py
new file mode 100644
index 000000000000..e87e22b49362
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/__init__.py
@@ -0,0 +1,16 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._text_analytics_client_operations import TextAnalyticsClientOperationsMixin
+
+__all__ = [
+ 'TextAnalyticsClientOperationsMixin',
+]
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/_text_analytics_client_operations.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/_text_analytics_client_operations.py
new file mode 100644
index 000000000000..c8cc0ed7dba8
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/operations/_text_analytics_client_operations.py
@@ -0,0 +1,912 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.pipeline import ClientRawResponse
+from msrest.exceptions import HttpOperationError
+from .. import models
+
+
+class TextAnalyticsClientOperationsMixin(object):
+
+ def analyze(
+ self, body=None, custom_headers=None, raw=False, **operation_config):
+ """Submit analysis job.
+
+ Submit a collection of text documents for analysis. Specify one or more
+ unique tasks to be executed.
+
+ :param body: Collection of documents to analyze and tasks to execute.
+ :type body:
+ ~azure.ai.textanalytics.v3_1_preview_4.models.AnalyzeBatchInput
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.analyze.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ if body is not None:
+ body_content = self._serialize.body(body, 'AnalyzeBatchInput')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ analyze.metadata = {'url': '/analyze'}
+
+ def analyze_status(
+ self, job_id, show_stats=None, top=20, skip=0, custom_headers=None, raw=False, **operation_config):
+ """Get analysis status and results.
+
+ Get the status of an analysis job. A job may consist of one or more
+ tasks. Once all tasks are completed, the job will transition to the
+ completed state and results will be available for each task.
+
+ :param job_id: Job ID for Analyze
+ :type job_id: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param top: (Optional) Set the maximum number of results per task.
+ When both $top and $skip are specified, $skip is applied first.
+ :type top: int
+ :param skip: (Optional) Set the number of elements to offset in the
+ response. When both $top and $skip are specified, $skip is applied
+ first.
+ :type skip: int
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.analyze_status.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if top is not None:
+ query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1)
+ if skip is not None:
+ query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('AnalyzeJobState', response)
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ analyze_status.metadata = {'url': '/analyze/jobs/{jobId}'}
+
+ def health_status(
+ self, job_id, top=20, skip=0, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Get healthcare analysis job status and results.
+
+ Get details of the healthcare prediction job specified by the jobId.
+
+ :param job_id: Job ID
+ :type job_id: str
+ :param top: (Optional) Set the maximum number of results per task.
+ When both $top and $skip are specified, $skip is applied first.
+ :type top: int
+ :param skip: (Optional) Set the number of elements to offset in the
+ response. When both $top and $skip are specified, $skip is applied
+ first.
+ :type skip: int
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.health_status.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if top is not None:
+ query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=50, minimum=1)
+ if skip is not None:
+ query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('HealthcareJobState', response)
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ health_status.metadata = {'url': '/entities/health/jobs/{jobId}'}
+
+ def cancel_health_job(
+ self, job_id, custom_headers=None, raw=False, **operation_config):
+ """Cancel healthcare prediction job.
+
+ Cancel healthcare prediction job.
+
+ :param job_id: Job ID
+ :type job_id: str
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ # Construct URL
+ url = self.cancel_health_job.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
+ 'jobId': self._serialize.url("job_id", job_id, 'str')
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 404, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 404:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ cancel_health_job.metadata = {'url': '/entities/health/jobs/{jobId}'}
+
+ def health(
+ self, documents, model_version=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Submit healthcare analysis job.
+
+ Start a healthcare analysis job to recognize healthcare related
+ entities (drugs, conditions, symptoms, etc) and their relations.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: ErrorResponse or ClientRawResponse if raw=true
+ :rtype: ~azure.ai.textanalytics.v3_1_preview_4.models.ErrorResponse or
+ ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.health.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [202, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+ header_dict = {
+ 'Operation-Location': 'str',
+ }
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ client_raw_response.add_headers(header_dict)
+ return client_raw_response
+
+ return deserialized
+ health.metadata = {'url': '/entities/health/jobs'}
+
+ def entities_recognition_general(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Named Entity Recognition.
+
+ The API returns a list of general named entities in a given document.
+ For the list of supported entity types, check Supported Entity Types in Text Analytics
+ API. See the Supported languages
+ in Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_general.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntitiesResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_general.metadata = {'url': '/entities/recognition/general'}
+
+ def entities_recognition_pii(
+ self, documents, model_version=None, show_stats=None, domain=None, string_index_type="TextElements_v8", pii_categories=None, custom_headers=None, raw=False, **operation_config):
+ """Entities containing personal information.
+
+ The API returns a list of entities with personal information (\"SSN\",
+ \"Bank Account\" etc) in the document. For the list of supported entity
+ types, check Supported Entity Types
+ in Text Analytics API. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+ .
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param domain: (Optional) if specified, will set the PII domain to
+ include only a subset of the entity categories. Possible values
+ include: 'PHI', 'none'.
+ :type domain: str
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType
+ :param pii_categories: (Optional) describes the PII categories to
+ return
+ :type pii_categories: list[str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.PiiCategory]
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_recognition_pii.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if domain is not None:
+ query_parameters['domain'] = self._serialize.query("domain", domain, 'str')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+ if pii_categories is not None:
+ query_parameters['piiCategories'] = self._serialize.query("pii_categories", pii_categories, '[str]', div=',', unique=True)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PiiResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_recognition_pii.metadata = {'url': '/entities/recognition/pii'}
+
+ def entities_linking(
+ self, documents, model_version=None, show_stats=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Linked entities from a well known knowledge base.
+
+ The API returns a list of recognized entities with links to a well
+ known knowledge base. See the Supported languages in Text Analytics
+ API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.entities_linking.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('EntityLinkingResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ entities_linking.metadata = {'url': '/entities/linking'}
+
+ def key_phrases(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Key Phrases.
+
+ The API returns a list of strings denoting the key phrases in the input
+ text. See the Supported languages in
+ Text Analytics API for the list of enabled languages.
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.key_phrases.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('KeyPhraseResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ key_phrases.metadata = {'url': '/keyPhrases'}
+
+ def languages(
+ self, documents, model_version=None, show_stats=None, custom_headers=None, raw=False, **operation_config):
+ """Detect Language.
+
+ The API returns the detected language and a numeric score between 0 and
+ 1. Scores close to 1 indicate 100% certainty that the identified
+ language is true. See the Supported
+ languages in Text Analytics API for the list of enabled languages.
+
+ :param documents:
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.LanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.LanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.languages.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'LanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('LanguageResult', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ languages.metadata = {'url': '/languages'}
+
+ def sentiment(
+ self, documents, model_version=None, show_stats=None, opinion_mining=None, string_index_type="TextElements_v8", custom_headers=None, raw=False, **operation_config):
+ """Sentiment.
+
+ The API returns a detailed sentiment analysis for the input text. The
+ analysis is done in multiple levels of granularity, start from the a
+ document level, down to sentence and key terms (targets and
+ assessments).
+
+ :param documents: The set of documents to process as part of this
+ batch.
+ :type documents:
+ list[~azure.ai.textanalytics.v3_1_preview_4.models.MultiLanguageInput]
+ :param model_version: (Optional) This value indicates which model will
+ be used for scoring. If a model-version is not specified, the API
+ should default to the latest, non-preview version.
+ :type model_version: str
+ :param show_stats: (Optional) if set to true, response will contain
+ request and document level statistics.
+ :type show_stats: bool
+ :param opinion_mining: (Optional) if set to true, response will
+ contain not only sentiment prediction but also opinion mining
+ (aspect-based sentiment analysis) results.
+ :type opinion_mining: bool
+ :param string_index_type: (Optional) Specifies the method used to
+ interpret string offsets. Defaults to Text Elements (Graphemes)
+ according to Unicode v8.0.0. For additional information see
+ https://aka.ms/text-analytics-offsets. Possible values include:
+ 'TextElements_v8', 'UnicodeCodePoint', 'Utf16CodeUnit'
+ :type string_index_type: str or
+ ~azure.ai.textanalytics.v3_1_preview_4.models.StringIndexType
+ :param dict custom_headers: headers that will be added to the request
+ :param bool raw: returns the direct response alongside the
+ deserialized response
+ :param operation_config: :ref:`Operation configuration
+ overrides`.
+ :return: object or ClientRawResponse if raw=true
+ :rtype: object or ~msrest.pipeline.ClientRawResponse
+ :raises:
+ :class:`HttpOperationError`
+ """
+ input = models.MultiLanguageBatchInput(documents=documents)
+
+ # Construct URL
+ url = self.sentiment.metadata['url']
+ path_format_arguments = {
+ 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if model_version is not None:
+ query_parameters['model-version'] = self._serialize.query("model_version", model_version, 'str')
+ if show_stats is not None:
+ query_parameters['showStats'] = self._serialize.query("show_stats", show_stats, 'bool')
+ if opinion_mining is not None:
+ query_parameters['opinionMining'] = self._serialize.query("opinion_mining", opinion_mining, 'bool')
+ if string_index_type is not None:
+ query_parameters['stringIndexType'] = self._serialize.query("string_index_type", string_index_type, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if custom_headers:
+ header_parameters.update(custom_headers)
+
+ # Construct body
+ body_content = self._serialize.body(input, 'MultiLanguageBatchInput')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ response = self._client.send(request, stream=False, **operation_config)
+
+ if response.status_code not in [200, 400, 500]:
+ raise HttpOperationError(self._deserialize, response)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SentimentResponse', response)
+ if response.status_code == 400:
+ deserialized = self._deserialize('ErrorResponse', response)
+ if response.status_code == 500:
+ deserialized = self._deserialize('ErrorResponse', response)
+
+ if raw:
+ client_raw_response = ClientRawResponse(deserialized, response)
+ return client_raw_response
+
+ return deserialized
+ sentiment.metadata = {'url': '/sentiment'}
diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/version.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/version.py
new file mode 100644
index 000000000000..b8ffb04f789f
--- /dev/null
+++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/v3_1_preview_4/version.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+VERSION = "0.0.1"
+