Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@

## 5.1.0b8 (Unreleased)

**Breaking Changes**

- Changed the response structure of `being_analyze_actions`. Now, we return a list of results, where each result is a list of the action results for the document, in the order the documents and actions were passed
- Removed `AnalyzeActionsType`
- Removed `AnalyzeActionsResult`
- Removed `AnalyzeActionsError`

## 5.1.0b7 (2021-05-18)

Expand Down
103 changes: 29 additions & 74 deletions sdk/textanalytics/azure-ai-textanalytics/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -506,10 +506,7 @@ from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
RecognizeLinkedEntitiesAction,
AnalyzeSentimentAction
AnalyzeSentimentAction,
)

credential = AzureKeyCredential("<api_key>")
Expand All @@ -524,81 +521,39 @@ poller = text_analytics_client.begin_analyze_actions(
display_name="Sample Text Analysis",
actions=[
RecognizeEntitiesAction(),
RecognizePiiEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
]
)

# returns multiple actions results in the same order as the inputted actions
result = poller.result()

first_action_result = next(result)
print("Results of Entities Recognition action:")
docs = [doc for doc in first_action_result.document_results if not doc.is_error]

for idx, doc in enumerate(docs):
print("\nDocument text: {}".format(documents[idx]))
for entity in doc.entities:
print("Entity: {}".format(entity.text))
print("...Category: {}".format(entity.category))
print("...Confidence Score: {}".format(entity.confidence_score))
print("...Offset: {}".format(entity.offset))
print("------------------------------------------")

second_action_result = next(result)
print("Results of PII Entities Recognition action:")
docs = [doc for doc in second_action_result.document_results if not doc.is_error]

for idx, doc in enumerate(docs):
print("Document text: {}".format(documents[idx]))
for entity in doc.entities:
print("Entity: {}".format(entity.text))
print("Category: {}".format(entity.category))
print("Confidence Score: {}\n".format(entity.confidence_score))
print("------------------------------------------")

third_action_result = next(result)
print("Results of Key Phrase Extraction action:")
docs = [doc for doc in third_action_result.document_results if not doc.is_error]

for idx, doc in enumerate(docs):
print("Document text: {}\n".format(documents[idx]))
print("Key Phrases: {}\n".format(doc.key_phrases))
print("------------------------------------------")

fourth_action_result = next(result)
print("Results of Linked Entities Recognition action:")
docs = [doc for doc in fourth_action_result.document_results if not doc.is_error]

for idx, doc in enumerate(docs):
print("Document text: {}\n".format(documents[idx]))
for linked_entity in doc.entities:
print("Entity name: {}".format(linked_entity.name))
print("...Data source: {}".format(linked_entity.data_source))
print("...Data source language: {}".format(linked_entity.language))
print("...Data source entity ID: {}".format(linked_entity.data_source_entity_id))
print("...Data source URL: {}".format(linked_entity.url))
print("...Document matches:")
for match in linked_entity.matches:
print("......Match text: {}".format(match.text))
print(".........Confidence Score: {}".format(match.confidence_score))
print(".........Offset: {}".format(match.offset))
print(".........Length: {}".format(match.length))
print("------------------------------------------")

fifth_action_result = next(result)
print("Results of Sentiment Analysis action:")
docs = [doc for doc in fifth_action_result.document_results if not doc.is_error]

for doc in docs:
print("Overall sentiment: {}".format(doc.sentiment))
print("Scores: positive={}; neutral={}; negative={} \n".format(
doc.confidence_scores.positive,
doc.confidence_scores.neutral,
doc.confidence_scores.negative,
))
document_results = poller.result()
for doc, action_results in zip(documents, document_results):
recognize_entities_result, analyze_sentiment_result = action_results
print("\nDocument text: {}".format(doc))
print("...Results of Recognize Entities Action:")
if recognize_entities_result.is_error:
print("......Is an error with code '{}' and message '{}'".format(
recognize_entities_result.code, recognize_entities_result.message
))
else:
for entity in recognize_entities_result.entities:
print("......Entity: {}".format(entity.text))
print(".........Category: {}".format(entity.category))
print(".........Confidence Score: {}".format(entity.confidence_score))
print(".........Offset: {}".format(entity.offset))

print("...Results of Analyze Sentiment action:")
if analyze_sentiment_result.is_error:
print("......Is an error with code '{}' and message '{}'".format(
analyze_sentiment_result.code, analyze_sentiment_result.message
))
else:
print("......Overall sentiment: {}".format(analyze_sentiment_result.sentiment))
print("......Scores: positive={}; neutral={}; negative={} \n".format(
analyze_sentiment_result.confidence_scores.positive,
analyze_sentiment_result.confidence_scores.neutral,
analyze_sentiment_result.confidence_scores.negative,
))
print("------------------------------------------")
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeActionsResult,
AnalyzeActionsType,
AnalyzeActionsError,
_AnalyzeActionsType,
HealthcareEntityRelationRoleType,
HealthcareRelation,
HealthcareRelationRole,
Expand Down Expand Up @@ -91,9 +89,7 @@
'RecognizeLinkedEntitiesAction',
'RecognizePiiEntitiesAction',
'ExtractKeyPhrasesAction',
'AnalyzeActionsResult',
'AnalyzeActionsType',
"AnalyzeActionsError",
'_AnalyzeActionsType',
"PiiEntityCategoryType",
"HealthcareEntityRelationType",
"HealthcareEntityRelationRoleType",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1356,7 +1356,7 @@ def __repr__(self):
.format(self.positive, self.neutral, self.negative)[:1024]


class AnalyzeActionsType(str, Enum):
class _AnalyzeActionsType(str, Enum):
"""The type of action that was applied to the documents
"""
RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action.
Expand All @@ -1365,62 +1365,6 @@ class AnalyzeActionsType(str, Enum):
RECOGNIZE_LINKED_ENTITIES = "recognize_linked_entities" #: Linked Entities Recognition action.
ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action.


class AnalyzeActionsResult(DictMixin):
"""AnalyzeActionsResult contains the results of a recognize entities action
on a list of documents. Returned by `begin_analyze_actions`

:ivar document_results: A list of objects containing results for all Entity Recognition actions
included in the analysis.
:vartype document_results: list[~azure.ai.textanalytics.RecognizeEntitiesResult]
:ivar bool is_error: Boolean check for error item when iterating over list of
actions. Always False for an instance of a AnalyzeActionsResult.
:ivar action_type: The type of action this class is a result of.
:vartype action_type: str or ~azure.ai.textanalytics.AnalyzeActionsType
:ivar ~datetime.datetime completed_on: Date and time (UTC) when the result completed
on the service.
"""
def __init__(self, **kwargs):
self.document_results = kwargs.get("document_results")
self.is_error = False
self.action_type = kwargs.get("action_type")
self.completed_on = kwargs.get("completed_on")

def __repr__(self):
return "AnalyzeActionsResult(document_results={}, is_error={}, action_type={}, completed_on={})".format(
repr(self.document_results),
self.is_error,
self.action_type,
self.completed_on,
)[:1024]


class AnalyzeActionsError(DictMixin):
"""AnalyzeActionsError is an error object which represents an an
error response for an action.

:ivar error: The action result error.
:vartype error: ~azure.ai.textanalytics.TextAnalyticsError
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""

def __init__(self, **kwargs):
self.error = kwargs.get("error")
self.is_error = True

def __repr__(self):
return "AnalyzeActionsError(error={}, is_error={}".format(
repr(self.error), self.is_error
)

@classmethod
def _from_generated(cls, error):
return cls(
error=TextAnalyticsError(code=error.code, message=error.message, target=error.target)
)


class RecognizeEntitiesAction(DictMixin):
"""RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
RecognizePiiEntitiesAction,
RecognizeLinkedEntitiesAction,
AnalyzeSentimentAction,
AnalyzeActionsType,
_AnalyzeActionsType,
)

def _validate_input(documents, hint, whole_input_hint):
Expand Down Expand Up @@ -71,14 +71,14 @@ def _validate_input(documents, hint, whole_input_hint):

def _determine_action_type(action):
if isinstance(action, RecognizeEntitiesAction):
return AnalyzeActionsType.RECOGNIZE_ENTITIES
return _AnalyzeActionsType.RECOGNIZE_ENTITIES
if isinstance(action, RecognizePiiEntitiesAction):
return AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
return _AnalyzeActionsType.RECOGNIZE_PII_ENTITIES
if isinstance(action, RecognizeLinkedEntitiesAction):
return AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
return _AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES
if isinstance(action, AnalyzeSentimentAction):
return AnalyzeActionsType.ANALYZE_SENTIMENT
return AnalyzeActionsType.EXTRACT_KEY_PHRASES
return _AnalyzeActionsType.ANALYZE_SENTIMENT
return _AnalyzeActionsType.EXTRACT_KEY_PHRASES

def _check_string_index_type_arg(string_index_type_arg, api_version, string_index_type_default="UnicodeCodePoint"):
string_index_type = None
Expand Down
Loading