diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md b/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
index 4e2634d38137..4ed465a32252 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/README.md
@@ -70,18 +70,18 @@ The following examples show common scenarios using the `client` [created above](
### Ask a question
-The only input required to ask a question using a knowledgebase is just the question itself:
+The only input required to ask a question using a knowledge base is just the question itself:
```python
from azure.ai.language.questionanswering import models as qna
-params = qna.KnowledgebaseQueryParameters(
+params = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?"
)
output = client.query_knowledgebase(
project_name="FAQ",
- knowledgebase_query_parameters=params
+ knowledge_base_query_options=params
)
for candidate in output.answers:
print("({}) {}".format(candidate.confidence_score, candidate.answer))
@@ -89,16 +89,16 @@ for candidate in output.answers:
```
-You can set additional properties on `KnowledgebaseQueryParameters` to limit the number of answers, specify a minimum confidence score, and more.
+You can set additional properties on `KnowledgeBaseQueryOptions` to limit the number of answers, specify a minimum confidence score, and more.
### Ask a follow-up question
-If your knowledgebase is configured for [chit-chat][questionanswering_docs_chat], you can ask a follow-up question provided the previous question-answering ID and, optionally, the exact question the user asked:
+If your knowledge base is configured for [chit-chat][questionanswering_docs_chat], you can ask a follow-up question provided the previous question-answering ID and, optionally, the exact question the user asked:
```python
-params = qna.models.KnowledgebaseQueryParameters(
+params = qna.models.KnowledgeBaseQueryOptions(
question="How long should charging take?"
- context=qna.models.KnowledgebaseAnswerRequestContext(
+ context=qna.models.KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
previous_qna_id=previous_answer.id
)
@@ -106,7 +106,7 @@ params = qna.models.KnowledgebaseQueryParameters(
output = client.query_knowledgebase(
project_name="FAQ",
- knowledgebase_query_parameters=params
+ knowledge_base_query_options=params
)
for candidate in output.answers:
print("({}) {}".format(candidate.confidence_score, candidate.answer))
@@ -123,13 +123,13 @@ from azure.ai.language.questionanswering import models as qna
client = QuestionAnsweringClient(endpoint, credential)
-params = qna.KnowledgebaseQueryParameters(
+params = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?"
)
output = await client.query_knowledgebase(
project_name="FAQ",
- knowledgebase_query_parameters=params
+ knowledge_base_query_options=params
)
```
@@ -149,8 +149,8 @@ from azure.core.exceptions import HttpResponseError
try:
client.query_knowledgebase(
- project_name="invalid-knowledgebase",
- knowledgebase_query_parameters=params
+ project_name="invalid-knowledge-base",
+ knowledge_base_query_options=params
)
except HttpResponseError as error:
print("Query failed: {}".format(error.message))
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/__init__.py
index 69e3be50dac4..d55ccad1f573 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/__init__.py
@@ -1 +1 @@
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/__init__.py
index 69e3be50dac4..d55ccad1f573 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/__init__.py
@@ -1 +1 @@
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/__init__.py
index 69e3be50dac4..d55ccad1f573 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/__init__.py
@@ -1 +1 @@
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/__init__.py
index d1224fabb06b..7dd89d1d204c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/__init__.py
@@ -10,10 +10,11 @@
from ._version import VERSION
__version__ = VERSION
-__all__ = ['QuestionAnsweringClient']
+__all__ = ["QuestionAnsweringClient"]
try:
from ._patch import patch_sdk # type: ignore
+
patch_sdk()
except ImportError:
pass
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
index 65bc45457fd3..c38beab9a4a0 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/_question_answering_client.py
@@ -12,6 +12,10 @@
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
+from . import models
+from ._configuration import QuestionAnsweringClientConfiguration
+from .operations import QuestionAnsweringClientOperationsMixin
+
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
@@ -19,15 +23,12 @@
from azure.core.credentials import AzureKeyCredential
from azure.core.rest import HttpRequest, HttpResponse
-from ._configuration import QuestionAnsweringClientConfiguration
-from .operations import QuestionAnsweringClientOperationsMixin
-from . import models
-
class QuestionAnsweringClient(QuestionAnsweringClientOperationsMixin):
"""The language service API is a suite of natural language processing (NLP) skills built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction, language detection and question answering. Further documentation can be found in :code:`https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview`.
- :param endpoint: Supported Cognitive Services endpoint (e.g., https://:code:``.api.cognitiveservices.azure.com).
+ :param endpoint: Supported Cognitive Services endpoint (e.g.,
+ https://:code:``.api.cognitiveservices.azure.com).
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
@@ -49,15 +50,19 @@ def __init__(
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- def send_request(self, request, **kwargs):
- # type: (HttpRequest, Any) -> HttpResponse
+ def send_request(
+ self,
+ request, # type: HttpRequest
+ **kwargs # type: Any
+ ):
+ # type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.ai.language.questionanswering.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.ai.language.questionanswering.rest import build_query_knowledgebase_request
- >>> request = build_query_knowledgebase_request(project_name, json, content, deployment_name)
+ >>> request = build_query_knowledgebase_request(project_name=project_name, json=json, content=content, deployment_name=deployment_name, **kwargs)
>>> response = client.send_request(request)
@@ -73,6 +78,7 @@ def send_request(self, request, **kwargs):
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
+
request_copy = deepcopy(request)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/__init__.py
index 3a05f81f6173..93007f203501 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/__init__.py
@@ -8,4 +8,4 @@
from ._question_answering_client import QuestionAnsweringClient
-__all__ = ['QuestionAnsweringClient']
+__all__ = ["QuestionAnsweringClient"]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
index 399cc33816b7..09131557630d 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/_question_answering_client.py
@@ -7,22 +7,23 @@
# --------------------------------------------------------------------------
from copy import deepcopy
-from typing import Any
+from typing import Any, Awaitable
from azure.core import AsyncPipelineClient
from azure.core.credentials import AzureKeyCredential
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
+from .. import models
from ._configuration import QuestionAnsweringClientConfiguration
from .operations import QuestionAnsweringClientOperationsMixin
-from .. import models
class QuestionAnsweringClient(QuestionAnsweringClientOperationsMixin):
"""The language service API is a suite of natural language processing (NLP) skills built with best-in-class Microsoft machine learning algorithms. The API can be used to analyze unstructured text for tasks such as sentiment analysis, key phrase extraction, language detection and question answering. Further documentation can be found in :code:`https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview`.
- :param endpoint: Supported Cognitive Services endpoint (e.g., https://:code:``.api.cognitiveservices.azure.com).
+ :param endpoint: Supported Cognitive Services endpoint (e.g.,
+ https://:code:``.api.cognitiveservices.azure.com).
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
@@ -38,14 +39,14 @@ def __init__(self, endpoint: str, credential: AzureKeyCredential, **kwargs: Any)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
- def send_request(self, request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
+ def send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.ai.language.questionanswering.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.ai.language.questionanswering.rest import build_query_knowledgebase_request
- >>> request = build_query_knowledgebase_request(project_name, json, content, deployment_name)
+ >>> request = build_query_knowledgebase_request(project_name=project_name, json=json, content=content, deployment_name=deployment_name, **kwargs)
>>> response = await client.send_request(request)
@@ -61,6 +62,7 @@ def send_request(self, request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
+
request_copy = deepcopy(request)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_question_answering_client_operations.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_question_answering_client_operations.py
index 1c808e40ea9f..ae6ee524394c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_question_answering_client_operations.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/aio/operations/_question_answering_client_operations.py
@@ -20,7 +20,7 @@
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
-from ... import models as _models, rest
+from ... import models as _models, rest as rest
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
@@ -29,44 +29,42 @@
class QuestionAnsweringClientOperationsMixin:
async def query_knowledgebase(
self,
- knowledgebase_query_parameters: "_models.KnowledgebaseQueryParameters",
+ knowledge_base_query_options: "_models.KnowledgeBaseQueryOptions",
*,
project_name: str,
deployment_name: Optional[str] = None,
**kwargs: Any
- ) -> "_models.KnowledgebaseAnswers":
- """Answers the specified question using your knowledgebase.
+ ) -> "_models.KnowledgeBaseAnswers":
+ """Answers the specified question using your knowledge base.
- Answers the specified question using your knowledgebase.
+ Answers the specified question using your knowledge base.
+ :param knowledge_base_query_options: Post body of the request.
+ :type knowledge_base_query_options:
+ ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
- :param knowledgebase_query_parameters: Post body of the request.
- :type knowledgebase_query_parameters:
- ~azure.ai.language.questionanswering.models.KnowledgebaseQueryParameters
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgebaseAnswers, or the result of cls(response)
- :rtype: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswers
+ :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgebaseAnswers"]
+ cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgeBaseAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
-
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
- json = self._serialize.body(knowledgebase_query_parameters, "object")
+ json = self._serialize.body(knowledge_base_query_options, "KnowledgeBaseQueryOptions")
request = rest.build_query_knowledgebase_request(
+ content_type=content_type,
project_name=project_name,
deployment_name=deployment_name,
json=json,
- content_type=content_type,
template_url=self.query_knowledgebase.metadata["url"],
- **kwargs
- )
+ )._to_pipeline_transport_request()
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
@@ -82,24 +80,22 @@ async def query_knowledgebase(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
- deserialized = self._deserialize("KnowledgebaseAnswers", pipeline_response)
+ deserialized = self._deserialize("KnowledgeBaseAnswers", pipeline_response)
if cls:
- return cls(PipelineResponse._convert(pipeline_response), deserialized, {})
+ return cls(pipeline_response, deserialized, {})
return deserialized
query_knowledgebase.metadata = {"url": "/:query-knowledgebases"} # type: ignore
- async def query_text(
- self, text_query_parameters: "_models.TextQueryParameters", **kwargs: Any
- ) -> "_models.TextAnswers":
+ async def query_text(self, text_query_options: "_models.TextQueryOptions", **kwargs: Any) -> "_models.TextAnswers":
"""Answers the specified question using the provided text in the body.
Answers the specified question using the provided text in the body.
- :param text_query_parameters: Post body of the request.
- :type text_query_parameters: ~azure.ai.language.questionanswering.models.TextQueryParameters
+ :param text_query_options: Post body of the request.
+ :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TextAnswers, or the result of cls(response)
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
@@ -108,14 +104,15 @@ async def query_text(
cls = kwargs.pop("cls", None) # type: ClsType["_models.TextAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
-
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
- json = self._serialize.body(text_query_parameters, "object")
+ json = self._serialize.body(text_query_options, "TextQueryOptions")
request = rest.build_query_text_request(
- json=json, content_type=content_type, template_url=self.query_text.metadata["url"], **kwargs
- )
+ content_type=content_type,
+ json=json,
+ template_url=self.query_text.metadata["url"],
+ )._to_pipeline_transport_request()
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
@@ -134,7 +131,7 @@ async def query_text(
deserialized = self._deserialize("TextAnswers", pipeline_response)
if cls:
- return cls(PipelineResponse._convert(pipeline_response), deserialized, {})
+ return cls(pipeline_response, deserialized, {})
return deserialized
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
index 0c4b0dd97763..55d25e6fff0c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/__init__.py
@@ -12,39 +12,39 @@
from ._models_py3 import Error
from ._models_py3 import ErrorResponse
from ._models_py3 import InnerErrorModel
- from ._models_py3 import KnowledgebaseAnswer
- from ._models_py3 import KnowledgebaseAnswerDialog
- from ._models_py3 import KnowledgebaseAnswerPrompt
- from ._models_py3 import KnowledgebaseAnswerRequestContext
- from ._models_py3 import KnowledgebaseAnswers
- from ._models_py3 import KnowledgebaseQueryParameters
+ from ._models_py3 import KnowledgeBaseAnswer
+ from ._models_py3 import KnowledgeBaseAnswerDialog
+ from ._models_py3 import KnowledgeBaseAnswerPrompt
+ from ._models_py3 import KnowledgeBaseAnswerRequestContext
+ from ._models_py3 import KnowledgeBaseAnswers
+ from ._models_py3 import KnowledgeBaseQueryOptions
from ._models_py3 import MetadataFilter
from ._models_py3 import StrictFilters
from ._models_py3 import TextAnswer
from ._models_py3 import TextAnswers
- from ._models_py3 import TextInput
- from ._models_py3 import TextQueryParameters
+ from ._models_py3 import TextQueryOptions
+ from ._models_py3 import TextRecord
except (SyntaxError, ImportError):
from ._models import AnswerSpan # type: ignore
from ._models import AnswerSpanRequest # type: ignore
from ._models import Error # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import InnerErrorModel # type: ignore
- from ._models import KnowledgebaseAnswer # type: ignore
- from ._models import KnowledgebaseAnswerDialog # type: ignore
- from ._models import KnowledgebaseAnswerPrompt # type: ignore
- from ._models import KnowledgebaseAnswerRequestContext # type: ignore
- from ._models import KnowledgebaseAnswers # type: ignore
- from ._models import KnowledgebaseQueryParameters # type: ignore
+ from ._models import KnowledgeBaseAnswer # type: ignore
+ from ._models import KnowledgeBaseAnswerDialog # type: ignore
+ from ._models import KnowledgeBaseAnswerPrompt # type: ignore
+ from ._models import KnowledgeBaseAnswerRequestContext # type: ignore
+ from ._models import KnowledgeBaseAnswers # type: ignore
+ from ._models import KnowledgeBaseQueryOptions # type: ignore
from ._models import MetadataFilter # type: ignore
from ._models import StrictFilters # type: ignore
from ._models import TextAnswer # type: ignore
from ._models import TextAnswers # type: ignore
- from ._models import TextInput # type: ignore
- from ._models import TextQueryParameters # type: ignore
+ from ._models import TextQueryOptions # type: ignore
+ from ._models import TextRecord # type: ignore
from ._question_answering_client_enums import (
- CompoundOperationType,
+ CompoundOperationKind,
ErrorCode,
InnerErrorCode,
RankerType,
@@ -57,19 +57,19 @@
"Error",
"ErrorResponse",
"InnerErrorModel",
- "KnowledgebaseAnswer",
- "KnowledgebaseAnswerDialog",
- "KnowledgebaseAnswerPrompt",
- "KnowledgebaseAnswerRequestContext",
- "KnowledgebaseAnswers",
- "KnowledgebaseQueryParameters",
+ "KnowledgeBaseAnswer",
+ "KnowledgeBaseAnswerDialog",
+ "KnowledgeBaseAnswerPrompt",
+ "KnowledgeBaseAnswerRequestContext",
+ "KnowledgeBaseAnswers",
+ "KnowledgeBaseQueryOptions",
"MetadataFilter",
"StrictFilters",
"TextAnswer",
"TextAnswers",
- "TextInput",
- "TextQueryParameters",
- "CompoundOperationType",
+ "TextQueryOptions",
+ "TextRecord",
+ "CompoundOperationKind",
"ErrorCode",
"InnerErrorCode",
"RankerType",
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
index d98ed137dc45..6c8cf32df4db 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models.py
@@ -173,8 +173,8 @@ def __init__(self, **kwargs):
self.innererror = kwargs.get("innererror", None)
-class KnowledgebaseAnswer(msrest.serialization.Model):
- """Represents Knowledgebase Answer.
+class KnowledgeBaseAnswer(msrest.serialization.Model):
+ """Represents knowledge base answer.
:param questions: List of questions.
:type questions: list[str]
@@ -190,7 +190,7 @@ class KnowledgebaseAnswer(msrest.serialization.Model):
answers.
:type metadata: dict[str, str]
:param dialog: Dialog associated with Answer.
- :type dialog: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswerDialog
+ :type dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
:param answer_span: Answer span object of QnA with respect to user's question.
:type answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
@@ -206,12 +206,12 @@ class KnowledgebaseAnswer(msrest.serialization.Model):
"id": {"key": "id", "type": "int"},
"source": {"key": "source", "type": "str"},
"metadata": {"key": "metadata", "type": "{str}"},
- "dialog": {"key": "dialog", "type": "KnowledgebaseAnswerDialog"},
+ "dialog": {"key": "dialog", "type": "KnowledgeBaseAnswerDialog"},
"answer_span": {"key": "answerSpan", "type": "AnswerSpan"},
}
def __init__(self, **kwargs):
- super(KnowledgebaseAnswer, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswer, self).__init__(**kwargs)
self.questions = kwargs.get("questions", None)
self.answer = kwargs.get("answer", None)
self.confidence_score = kwargs.get("confidence_score", None)
@@ -222,7 +222,7 @@ def __init__(self, **kwargs):
self.answer_span = kwargs.get("answer_span", None)
-class KnowledgebaseAnswerDialog(msrest.serialization.Model):
+class KnowledgeBaseAnswerDialog(msrest.serialization.Model):
"""Dialog associated with Answer.
:param is_context_only: To mark if a prompt is relevant only with a previous question or not.
@@ -230,7 +230,7 @@ class KnowledgebaseAnswerDialog(msrest.serialization.Model):
false, ignores context and includes this QnA in search result.
:type is_context_only: bool
:param prompts: List of 0 to 20 prompts associated with the answer.
- :type prompts: list[~azure.ai.language.questionanswering.models.KnowledgebaseAnswerPrompt]
+ :type prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
_validation = {
@@ -239,16 +239,16 @@ class KnowledgebaseAnswerDialog(msrest.serialization.Model):
_attribute_map = {
"is_context_only": {"key": "isContextOnly", "type": "bool"},
- "prompts": {"key": "prompts", "type": "[KnowledgebaseAnswerPrompt]"},
+ "prompts": {"key": "prompts", "type": "[KnowledgeBaseAnswerPrompt]"},
}
def __init__(self, **kwargs):
- super(KnowledgebaseAnswerDialog, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs)
self.is_context_only = kwargs.get("is_context_only", None)
self.prompts = kwargs.get("prompts", None)
-class KnowledgebaseAnswerPrompt(msrest.serialization.Model):
+class KnowledgeBaseAnswerPrompt(msrest.serialization.Model):
"""Prompt for an answer.
:param display_order: Index of the prompt - used in ordering of the prompts.
@@ -270,13 +270,13 @@ class KnowledgebaseAnswerPrompt(msrest.serialization.Model):
}
def __init__(self, **kwargs):
- super(KnowledgebaseAnswerPrompt, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs)
self.display_order = kwargs.get("display_order", None)
self.qna_id = kwargs.get("qna_id", None)
self.display_text = kwargs.get("display_text", None)
-class KnowledgebaseAnswerRequestContext(msrest.serialization.Model):
+class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
"""Context object with previous QnA's information.
All required parameters must be populated in order to send to Azure.
@@ -297,31 +297,31 @@ class KnowledgebaseAnswerRequestContext(msrest.serialization.Model):
}
def __init__(self, **kwargs):
- super(KnowledgebaseAnswerRequestContext, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerRequestContext, self).__init__(**kwargs)
self.previous_qna_id = kwargs["previous_qna_id"]
self.previous_user_query = kwargs.get("previous_user_query", None)
-class KnowledgebaseAnswers(msrest.serialization.Model):
+class KnowledgeBaseAnswers(msrest.serialization.Model):
"""Represents List of Question Answers.
:param answers: Represents Answer Result list.
- :type answers: list[~azure.ai.language.questionanswering.models.KnowledgebaseAnswer]
+ :type answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
_attribute_map = {
- "answers": {"key": "answers", "type": "[KnowledgebaseAnswer]"},
+ "answers": {"key": "answers", "type": "[KnowledgeBaseAnswer]"},
}
def __init__(self, **kwargs):
- super(KnowledgebaseAnswers, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswers, self).__init__(**kwargs)
self.answers = kwargs.get("answers", None)
-class KnowledgebaseQueryParameters(msrest.serialization.Model):
- """The question parameters to answer using a knowledgebase.
+class KnowledgeBaseQueryOptions(msrest.serialization.Model):
+ """The question parameters to answer using a knowledge base.
- :param qna_id: Exact QnA ID to fetch from the knowledgebase, this field takes priority over
+ :param qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:type qna_id: int
:param question: User question to query against the knowledge base.
@@ -334,11 +334,11 @@ class KnowledgebaseQueryParameters(msrest.serialization.Model):
1.
:type confidence_score_threshold: float
:param context: Context object with previous QnA's information.
- :type context: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswerRequestContext
+ :type context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
:param ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:type ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :param strict_filters: Filter QnAs based on give metadata list and knowledgebase source names.
+ :param strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
:type strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
:param answer_span_request: To configure Answer span prediction feature.
:type answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
@@ -356,7 +356,7 @@ class KnowledgebaseQueryParameters(msrest.serialization.Model):
"top": {"key": "top", "type": "int"},
"user_id": {"key": "userId", "type": "str"},
"confidence_score_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
- "context": {"key": "context", "type": "KnowledgebaseAnswerRequestContext"},
+ "context": {"key": "context", "type": "KnowledgeBaseAnswerRequestContext"},
"ranker_type": {"key": "rankerType", "type": "str"},
"strict_filters": {"key": "strictFilters", "type": "StrictFilters"},
"answer_span_request": {"key": "answerSpanRequest", "type": "AnswerSpanRequest"},
@@ -364,7 +364,7 @@ class KnowledgebaseQueryParameters(msrest.serialization.Model):
}
def __init__(self, **kwargs):
- super(KnowledgebaseQueryParameters, self).__init__(**kwargs)
+ super(KnowledgeBaseQueryOptions, self).__init__(**kwargs)
self.qna_id = kwargs.get("qna_id", None)
self.question = kwargs.get("question", None)
self.top = kwargs.get("top", None)
@@ -385,7 +385,7 @@ class MetadataFilter(msrest.serialization.Model):
:param compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
Possible values include: "AND", "OR".
:type compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationType
+ ~azure.ai.language.questionanswering.models.CompoundOperationKind
"""
_attribute_map = {
@@ -400,17 +400,17 @@ def __init__(self, **kwargs):
class StrictFilters(msrest.serialization.Model):
- """filters over knowledgebase.
+ """filters over knowledge base.
:param metadata_filter: Find QnAs that are associated with the given list of metadata.
:type metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
- :param source_filter: Find QnAs that are associated with the given list of sources in
- knowledgebase.
+ :param source_filter: Find QnAs that are associated with the given list of sources in knowledge
+ base.
:type source_filter: list[str]
:param compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
Possible values include: "AND", "OR".
:type compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationType
+ ~azure.ai.language.questionanswering.models.CompoundOperationKind
"""
_attribute_map = {
@@ -482,34 +482,7 @@ def __init__(self, **kwargs):
self.answers = kwargs.get("answers", None)
-class TextInput(msrest.serialization.Model):
- """Represent input text record to be queried.
-
- All required parameters must be populated in order to send to Azure.
-
- :param id: Required. Unique identifier for the text record.
- :type id: str
- :param text: Required. Text contents of the record.
- :type text: str
- """
-
- _validation = {
- "id": {"required": True},
- "text": {"required": True},
- }
-
- _attribute_map = {
- "id": {"key": "id", "type": "str"},
- "text": {"key": "text", "type": "str"},
- }
-
- def __init__(self, **kwargs):
- super(TextInput, self).__init__(**kwargs)
- self.id = kwargs["id"]
- self.text = kwargs["text"]
-
-
-class TextQueryParameters(msrest.serialization.Model):
+class TextQueryOptions(msrest.serialization.Model):
"""The question and text record parameters to answer.
All required parameters must be populated in order to send to Azure.
@@ -517,7 +490,7 @@ class TextQueryParameters(msrest.serialization.Model):
:param question: Required. User question to query against the given text records.
:type question: str
:param records: Required. Text records to be searched for given question.
- :type records: list[~azure.ai.language.questionanswering.models.TextInput]
+ :type records: list[~azure.ai.language.questionanswering.models.TextRecord]
:param language: Language of the text records. This is BCP-47 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
@@ -536,14 +509,41 @@ class TextQueryParameters(msrest.serialization.Model):
_attribute_map = {
"question": {"key": "question", "type": "str"},
- "records": {"key": "records", "type": "[TextInput]"},
+ "records": {"key": "records", "type": "[TextRecord]"},
"language": {"key": "language", "type": "str"},
"string_index_type": {"key": "stringIndexType", "type": "str"},
}
def __init__(self, **kwargs):
- super(TextQueryParameters, self).__init__(**kwargs)
+ super(TextQueryOptions, self).__init__(**kwargs)
self.question = kwargs["question"]
self.records = kwargs["records"]
self.language = kwargs.get("language", None)
self.string_index_type = kwargs.get("string_index_type", "TextElements_v8")
+
+
+class TextRecord(msrest.serialization.Model):
+ """Represent input text record to be queried.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique identifier for the text record.
+ :type id: str
+ :param text: Required. Text contents of the record.
+ :type text: str
+ """
+
+ _validation = {
+ "id": {"required": True},
+ "text": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "text": {"key": "text", "type": "str"},
+ }
+
+ def __init__(self, **kwargs):
+ super(TextRecord, self).__init__(**kwargs)
+ self.id = kwargs["id"]
+ self.text = kwargs["text"]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
index e0cf8d3d9fdf..cc0008ee865e 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_models_py3.py
@@ -210,8 +210,8 @@ def __init__(
self.innererror = innererror
-class KnowledgebaseAnswer(msrest.serialization.Model):
- """Represents Knowledgebase Answer.
+class KnowledgeBaseAnswer(msrest.serialization.Model):
+ """Represents knowledge base answer.
:param questions: List of questions.
:type questions: list[str]
@@ -227,7 +227,7 @@ class KnowledgebaseAnswer(msrest.serialization.Model):
answers.
:type metadata: dict[str, str]
:param dialog: Dialog associated with Answer.
- :type dialog: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswerDialog
+ :type dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
:param answer_span: Answer span object of QnA with respect to user's question.
:type answer_span: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
@@ -243,7 +243,7 @@ class KnowledgebaseAnswer(msrest.serialization.Model):
"id": {"key": "id", "type": "int"},
"source": {"key": "source", "type": "str"},
"metadata": {"key": "metadata", "type": "{str}"},
- "dialog": {"key": "dialog", "type": "KnowledgebaseAnswerDialog"},
+ "dialog": {"key": "dialog", "type": "KnowledgeBaseAnswerDialog"},
"answer_span": {"key": "answerSpan", "type": "AnswerSpan"},
}
@@ -256,11 +256,11 @@ def __init__(
id: Optional[int] = None,
source: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
- dialog: Optional["KnowledgebaseAnswerDialog"] = None,
+ dialog: Optional["KnowledgeBaseAnswerDialog"] = None,
answer_span: Optional["AnswerSpan"] = None,
**kwargs
):
- super(KnowledgebaseAnswer, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswer, self).__init__(**kwargs)
self.questions = questions
self.answer = answer
self.confidence_score = confidence_score
@@ -271,7 +271,7 @@ def __init__(
self.answer_span = answer_span
-class KnowledgebaseAnswerDialog(msrest.serialization.Model):
+class KnowledgeBaseAnswerDialog(msrest.serialization.Model):
"""Dialog associated with Answer.
:param is_context_only: To mark if a prompt is relevant only with a previous question or not.
@@ -279,7 +279,7 @@ class KnowledgebaseAnswerDialog(msrest.serialization.Model):
false, ignores context and includes this QnA in search result.
:type is_context_only: bool
:param prompts: List of 0 to 20 prompts associated with the answer.
- :type prompts: list[~azure.ai.language.questionanswering.models.KnowledgebaseAnswerPrompt]
+ :type prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
_validation = {
@@ -288,22 +288,22 @@ class KnowledgebaseAnswerDialog(msrest.serialization.Model):
_attribute_map = {
"is_context_only": {"key": "isContextOnly", "type": "bool"},
- "prompts": {"key": "prompts", "type": "[KnowledgebaseAnswerPrompt]"},
+ "prompts": {"key": "prompts", "type": "[KnowledgeBaseAnswerPrompt]"},
}
def __init__(
self,
*,
is_context_only: Optional[bool] = None,
- prompts: Optional[List["KnowledgebaseAnswerPrompt"]] = None,
+ prompts: Optional[List["KnowledgeBaseAnswerPrompt"]] = None,
**kwargs
):
- super(KnowledgebaseAnswerDialog, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs)
self.is_context_only = is_context_only
self.prompts = prompts
-class KnowledgebaseAnswerPrompt(msrest.serialization.Model):
+class KnowledgeBaseAnswerPrompt(msrest.serialization.Model):
"""Prompt for an answer.
:param display_order: Index of the prompt - used in ordering of the prompts.
@@ -332,13 +332,13 @@ def __init__(
display_text: Optional[str] = None,
**kwargs
):
- super(KnowledgebaseAnswerPrompt, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs)
self.display_order = display_order
self.qna_id = qna_id
self.display_text = display_text
-class KnowledgebaseAnswerRequestContext(msrest.serialization.Model):
+class KnowledgeBaseAnswerRequestContext(msrest.serialization.Model):
"""Context object with previous QnA's information.
All required parameters must be populated in order to send to Azure.
@@ -359,31 +359,31 @@ class KnowledgebaseAnswerRequestContext(msrest.serialization.Model):
}
def __init__(self, *, previous_qna_id: int, previous_user_query: Optional[str] = None, **kwargs):
- super(KnowledgebaseAnswerRequestContext, self).__init__(**kwargs)
+ super(KnowledgeBaseAnswerRequestContext, self).__init__(**kwargs)
self.previous_qna_id = previous_qna_id
self.previous_user_query = previous_user_query
-class KnowledgebaseAnswers(msrest.serialization.Model):
+class KnowledgeBaseAnswers(msrest.serialization.Model):
"""Represents List of Question Answers.
:param answers: Represents Answer Result list.
- :type answers: list[~azure.ai.language.questionanswering.models.KnowledgebaseAnswer]
+ :type answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
_attribute_map = {
- "answers": {"key": "answers", "type": "[KnowledgebaseAnswer]"},
+ "answers": {"key": "answers", "type": "[KnowledgeBaseAnswer]"},
}
- def __init__(self, *, answers: Optional[List["KnowledgebaseAnswer"]] = None, **kwargs):
- super(KnowledgebaseAnswers, self).__init__(**kwargs)
+ def __init__(self, *, answers: Optional[List["KnowledgeBaseAnswer"]] = None, **kwargs):
+ super(KnowledgeBaseAnswers, self).__init__(**kwargs)
self.answers = answers
-class KnowledgebaseQueryParameters(msrest.serialization.Model):
- """The question parameters to answer using a knowledgebase.
+class KnowledgeBaseQueryOptions(msrest.serialization.Model):
+ """The question parameters to answer using a knowledge base.
- :param qna_id: Exact QnA ID to fetch from the knowledgebase, this field takes priority over
+ :param qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:type qna_id: int
:param question: User question to query against the knowledge base.
@@ -396,11 +396,11 @@ class KnowledgebaseQueryParameters(msrest.serialization.Model):
1.
:type confidence_score_threshold: float
:param context: Context object with previous QnA's information.
- :type context: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswerRequestContext
+ :type context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerRequestContext
:param ranker_type: (Optional) Set to 'QuestionOnly' for using a question only Ranker. Possible
values include: "Default", "QuestionOnly".
:type ranker_type: str or ~azure.ai.language.questionanswering.models.RankerType
- :param strict_filters: Filter QnAs based on give metadata list and knowledgebase source names.
+ :param strict_filters: Filter QnAs based on give metadata list and knowledge base source names.
:type strict_filters: ~azure.ai.language.questionanswering.models.StrictFilters
:param answer_span_request: To configure Answer span prediction feature.
:type answer_span_request: ~azure.ai.language.questionanswering.models.AnswerSpanRequest
@@ -418,7 +418,7 @@ class KnowledgebaseQueryParameters(msrest.serialization.Model):
"top": {"key": "top", "type": "int"},
"user_id": {"key": "userId", "type": "str"},
"confidence_score_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
- "context": {"key": "context", "type": "KnowledgebaseAnswerRequestContext"},
+ "context": {"key": "context", "type": "KnowledgeBaseAnswerRequestContext"},
"ranker_type": {"key": "rankerType", "type": "str"},
"strict_filters": {"key": "strictFilters", "type": "StrictFilters"},
"answer_span_request": {"key": "answerSpanRequest", "type": "AnswerSpanRequest"},
@@ -433,14 +433,14 @@ def __init__(
top: Optional[int] = None,
user_id: Optional[str] = None,
confidence_score_threshold: Optional[float] = None,
- context: Optional["KnowledgebaseAnswerRequestContext"] = None,
+ context: Optional["KnowledgeBaseAnswerRequestContext"] = None,
ranker_type: Optional[Union[str, "RankerType"]] = None,
strict_filters: Optional["StrictFilters"] = None,
answer_span_request: Optional["AnswerSpanRequest"] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs
):
- super(KnowledgebaseQueryParameters, self).__init__(**kwargs)
+ super(KnowledgeBaseQueryOptions, self).__init__(**kwargs)
self.qna_id = qna_id
self.question = question
self.top = top
@@ -461,7 +461,7 @@ class MetadataFilter(msrest.serialization.Model):
:param compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
Possible values include: "AND", "OR".
:type compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationType
+ ~azure.ai.language.questionanswering.models.CompoundOperationKind
"""
_attribute_map = {
@@ -473,7 +473,7 @@ def __init__(
self,
*,
metadata: Optional[Dict[str, str]] = None,
- compound_operation: Optional[Union[str, "CompoundOperationType"]] = None,
+ compound_operation: Optional[Union[str, "CompoundOperationKind"]] = None,
**kwargs
):
super(MetadataFilter, self).__init__(**kwargs)
@@ -482,17 +482,17 @@ def __init__(
class StrictFilters(msrest.serialization.Model):
- """filters over knowledgebase.
+ """filters over knowledge base.
:param metadata_filter: Find QnAs that are associated with the given list of metadata.
:type metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
- :param source_filter: Find QnAs that are associated with the given list of sources in
- knowledgebase.
+ :param source_filter: Find QnAs that are associated with the given list of sources in knowledge
+ base.
:type source_filter: list[str]
:param compound_operation: (Optional) Set to 'OR' for joining metadata using 'OR' operation.
Possible values include: "AND", "OR".
:type compound_operation: str or
- ~azure.ai.language.questionanswering.models.CompoundOperationType
+ ~azure.ai.language.questionanswering.models.CompoundOperationKind
"""
_attribute_map = {
@@ -506,7 +506,7 @@ def __init__(
*,
metadata_filter: Optional["MetadataFilter"] = None,
source_filter: Optional[List[str]] = None,
- compound_operation: Optional[Union[str, "CompoundOperationType"]] = None,
+ compound_operation: Optional[Union[str, "CompoundOperationKind"]] = None,
**kwargs
):
super(StrictFilters, self).__init__(**kwargs)
@@ -581,34 +581,7 @@ def __init__(self, *, answers: Optional[List["TextAnswer"]] = None, **kwargs):
self.answers = answers
-class TextInput(msrest.serialization.Model):
- """Represent input text record to be queried.
-
- All required parameters must be populated in order to send to Azure.
-
- :param id: Required. Unique identifier for the text record.
- :type id: str
- :param text: Required. Text contents of the record.
- :type text: str
- """
-
- _validation = {
- "id": {"required": True},
- "text": {"required": True},
- }
-
- _attribute_map = {
- "id": {"key": "id", "type": "str"},
- "text": {"key": "text", "type": "str"},
- }
-
- def __init__(self, *, id: str, text: str, **kwargs):
- super(TextInput, self).__init__(**kwargs)
- self.id = id
- self.text = text
-
-
-class TextQueryParameters(msrest.serialization.Model):
+class TextQueryOptions(msrest.serialization.Model):
"""The question and text record parameters to answer.
All required parameters must be populated in order to send to Azure.
@@ -616,7 +589,7 @@ class TextQueryParameters(msrest.serialization.Model):
:param question: Required. User question to query against the given text records.
:type question: str
:param records: Required. Text records to be searched for given question.
- :type records: list[~azure.ai.language.questionanswering.models.TextInput]
+ :type records: list[~azure.ai.language.questionanswering.models.TextRecord]
:param language: Language of the text records. This is BCP-47 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
@@ -635,7 +608,7 @@ class TextQueryParameters(msrest.serialization.Model):
_attribute_map = {
"question": {"key": "question", "type": "str"},
- "records": {"key": "records", "type": "[TextInput]"},
+ "records": {"key": "records", "type": "[TextRecord]"},
"language": {"key": "language", "type": "str"},
"string_index_type": {"key": "stringIndexType", "type": "str"},
}
@@ -644,13 +617,40 @@ def __init__(
self,
*,
question: str,
- records: List["TextInput"],
+ records: List["TextRecord"],
language: Optional[str] = None,
string_index_type: Optional[Union[str, "StringIndexType"]] = "TextElements_v8",
**kwargs
):
- super(TextQueryParameters, self).__init__(**kwargs)
+ super(TextQueryOptions, self).__init__(**kwargs)
self.question = question
self.records = records
self.language = language
self.string_index_type = string_index_type
+
+
+class TextRecord(msrest.serialization.Model):
+ """Represent input text record to be queried.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. Unique identifier for the text record.
+ :type id: str
+ :param text: Required. Text contents of the record.
+ :type text: str
+ """
+
+ _validation = {
+ "id": {"required": True},
+ "text": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "text": {"key": "text", "type": "str"},
+ }
+
+ def __init__(self, *, id: str, text: str, **kwargs):
+ super(TextRecord, self).__init__(**kwargs)
+ self.id = id
+ self.text = text
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
index aace462c8469..1d2dafe470bc 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/models/_question_answering_client_enums.py
@@ -27,7 +27,7 @@ def __getattr__(cls, name):
raise AttributeError(name)
-class CompoundOperationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+class CompoundOperationKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""(Optional) Set to 'OR' for joining metadata using 'OR' operation."""
AND_ENUM = "AND"
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_question_answering_client_operations.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_question_answering_client_operations.py
index 280cb22fc259..67f3b791ab01 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_question_answering_client_operations.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/operations/_question_answering_client_operations.py
@@ -20,7 +20,7 @@
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
-from .. import models as _models, rest
+from .. import models as _models, rest as rest
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
@@ -33,44 +33,42 @@
class QuestionAnsweringClientOperationsMixin(object):
def query_knowledgebase(
self,
- knowledgebase_query_parameters, # type: "_models.KnowledgebaseQueryParameters"
+ knowledge_base_query_options, # type: "_models.KnowledgeBaseQueryOptions"
**kwargs # type: Any
):
- # type: (...) -> "_models.KnowledgebaseAnswers"
- """Answers the specified question using your knowledgebase.
+ # type: (...) -> "_models.KnowledgeBaseAnswers"
+ """Answers the specified question using your knowledge base.
- Answers the specified question using your knowledgebase.
+ Answers the specified question using your knowledge base.
+ :param knowledge_base_query_options: Post body of the request.
+ :type knowledge_base_query_options:
+ ~azure.ai.language.questionanswering.models.KnowledgeBaseQueryOptions
:keyword project_name: The name of the project to use.
:paramtype project_name: str
- :param knowledgebase_query_parameters: Post body of the request.
- :type knowledgebase_query_parameters:
- ~azure.ai.language.questionanswering.models.KnowledgebaseQueryParameters
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
- :return: KnowledgebaseAnswers, or the result of cls(response)
- :rtype: ~azure.ai.language.questionanswering.models.KnowledgebaseAnswers
+ :return: KnowledgeBaseAnswers, or the result of cls(response)
+ :rtype: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswers
:raises: ~azure.core.exceptions.HttpResponseError
"""
- cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgebaseAnswers"]
+ cls = kwargs.pop("cls", None) # type: ClsType["_models.KnowledgeBaseAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
-
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
project_name = kwargs.pop("project_name") # type: str
deployment_name = kwargs.pop("deployment_name", None) # type: Optional[str]
- json = self._serialize.body(knowledgebase_query_parameters, "object")
+ json = self._serialize.body(knowledge_base_query_options, "KnowledgeBaseQueryOptions")
request = rest.build_query_knowledgebase_request(
+ content_type=content_type,
project_name=project_name,
deployment_name=deployment_name,
json=json,
- content_type=content_type,
template_url=self.query_knowledgebase.metadata["url"],
- **kwargs
- )
+ )._to_pipeline_transport_request()
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
@@ -84,10 +82,10 @@ def query_knowledgebase(
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
- deserialized = self._deserialize("KnowledgebaseAnswers", pipeline_response)
+ deserialized = self._deserialize("KnowledgeBaseAnswers", pipeline_response)
if cls:
- return cls(PipelineResponse._convert(pipeline_response), deserialized, {})
+ return cls(pipeline_response, deserialized, {})
return deserialized
@@ -95,7 +93,7 @@ def query_knowledgebase(
def query_text(
self,
- text_query_parameters, # type: "_models.TextQueryParameters"
+ text_query_options, # type: "_models.TextQueryOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.TextAnswers"
@@ -103,8 +101,8 @@ def query_text(
Answers the specified question using the provided text in the body.
- :param text_query_parameters: Post body of the request.
- :type text_query_parameters: ~azure.ai.language.questionanswering.models.TextQueryParameters
+ :param text_query_options: Post body of the request.
+ :type text_query_options: ~azure.ai.language.questionanswering.models.TextQueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TextAnswers, or the result of cls(response)
:rtype: ~azure.ai.language.questionanswering.models.TextAnswers
@@ -113,14 +111,15 @@ def query_text(
cls = kwargs.pop("cls", None) # type: ClsType["_models.TextAnswers"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
-
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
- json = self._serialize.body(text_query_parameters, "object")
+ json = self._serialize.body(text_query_options, "TextQueryOptions")
request = rest.build_query_text_request(
- json=json, content_type=content_type, template_url=self.query_text.metadata["url"], **kwargs
- )
+ content_type=content_type,
+ json=json,
+ template_url=self.query_text.metadata["url"],
+ )._to_pipeline_transport_request()
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
@@ -137,7 +136,7 @@ def query_text(
deserialized = self._deserialize("TextAnswers", pipeline_response)
if cls:
- return cls(PipelineResponse._convert(pipeline_response), deserialized, {})
+ return cls(pipeline_response, deserialized, {})
return deserialized
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/__init__.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/__init__.py
index 4fdfbbf6f9e2..4edb3526786a 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/__init__.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/__init__.py
@@ -14,6 +14,6 @@
from ._request_builders import build_query_text_request # type: ignore
__all__ = [
- 'build_query_knowledgebase_request',
- 'build_query_text_request',
+ "build_query_knowledgebase_request",
+ "build_query_text_request",
]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders.py
index 57a66d5c7dec..981287ff61f4 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders.py
@@ -16,26 +16,27 @@
_SERIALIZER = Serializer()
+# fmt: off
def build_query_knowledgebase_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
- """Answers the specified question using your knowledgebase.
+ """Answers the specified question using your knowledge base.
- Answers the specified question using your knowledgebase.
+ Answers the specified question using your knowledge base.
- See https://aka.ms/azsdk/python/llcwiki for how to incorporate this request builder into your
- code flow.
+ See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
+ into your code flow.
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Post body of the request.
- :paramtype json: Any
+ :paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Post body of the request.
- :paramtype content: Any
+ :paramtype content: any
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
@@ -46,7 +47,7 @@ def build_query_knowledgebase_request(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your `json` input.
+ # JSON input template you can fill out and use as your body input.
json = {
"answerSpanRequest": {
"confidenceScoreThreshold": "float (optional)",
@@ -113,31 +114,35 @@ def build_query_knowledgebase_request(
}
"""
- content_type = kwargs.pop("content_type", None) # type: Optional[str]
- project_name = kwargs.pop("project_name") # type: str
- json = kwargs.pop("json", None) # type: Any
- deployment_name = kwargs.pop("deployment_name", None) # type: Optional[str]
+ content_type = kwargs.pop('content_type', None) # type: Optional[str]
+ project_name = kwargs.pop('project_name') # type: str
+ deployment_name = kwargs.pop('deployment_name', None) # type: Optional[str]
api_version = "2021-05-01-preview"
accept = "application/json"
-
# Construct URL
- url = kwargs.pop("template_url", "/:query-knowledgebases")
+ url = kwargs.pop("template_url", '/:query-knowledgebases')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
- query_parameters["projectName"] = _SERIALIZER.query("project_name", project_name, "str")
+ query_parameters['projectName'] = _SERIALIZER.query("project_name", project_name, 'str')
if deployment_name is not None:
- query_parameters["deploymentName"] = _SERIALIZER.query("deployment_name", deployment_name, "str")
- query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ query_parameters['deploymentName'] = _SERIALIZER.query("deployment_name", deployment_name, 'str')
+ query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
- header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
- header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
+ header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
- return HttpRequest(method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs)
+ return HttpRequest(
+ method="POST",
+ url=url,
+ params=query_parameters,
+ headers=header_parameters,
+ **kwargs
+ )
def build_query_text_request(
@@ -148,15 +153,15 @@ def build_query_text_request(
Answers the specified question using the provided text in the body.
- See https://aka.ms/azsdk/python/llcwiki for how to incorporate this request builder into your
- code flow.
+ See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
+ into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Post body of the request.
- :paramtype json: Any
+ :paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Post body of the request.
- :paramtype content: Any
+ :paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
@@ -165,7 +170,7 @@ def build_query_text_request(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your `json` input.
+ # JSON input template you can fill out and use as your body input.
json = {
"language": "str (optional)",
"question": "str",
@@ -198,23 +203,27 @@ def build_query_text_request(
}
"""
- content_type = kwargs.pop("content_type", None) # type: Optional[str]
- json = kwargs.pop("json", None) # type: Any
+ content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-05-01-preview"
accept = "application/json"
-
# Construct URL
- url = kwargs.pop("template_url", "/:query-text")
+ url = kwargs.pop("template_url", '/:query-text')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
- query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
- header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
- header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
-
- return HttpRequest(method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs)
+ header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
+
+ return HttpRequest(
+ method="POST",
+ url=url,
+ params=query_parameters,
+ headers=header_parameters,
+ **kwargs
+ )
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders_py3.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders_py3.py
index 2ea1239df3f6..283d2d36e45b 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders_py3.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/rest/_request_builders_py3.py
@@ -16,21 +16,21 @@
def build_query_knowledgebase_request(
*, project_name: str, json: Any = None, content: Any = None, deployment_name: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
- """Answers the specified question using your knowledgebase.
+ """Answers the specified question using your knowledge base.
- Answers the specified question using your knowledgebase.
+ Answers the specified question using your knowledge base.
- See https://aka.ms/azsdk/python/llcwiki for how to incorporate this request builder into your
- code flow.
+ See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
+ into your code flow.
:keyword project_name: The name of the project to use.
:paramtype project_name: str
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Post body of the request.
- :paramtype json: Any
+ :paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Post body of the request.
- :paramtype content: Any
+ :paramtype content: any
:keyword deployment_name: The name of the specific deployment of the project to use.
:paramtype deployment_name: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
@@ -41,7 +41,7 @@ def build_query_knowledgebase_request(
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your `json` input.
+ # JSON input template you can fill out and use as your body input.
json = {
"answerSpanRequest": {
"confidenceScoreThreshold": "float (optional)",
@@ -112,7 +112,6 @@ def build_query_knowledgebase_request(
api_version = "2021-05-01-preview"
accept = "application/json"
-
# Construct URL
url = kwargs.pop("template_url", "/:query-knowledgebases")
@@ -139,15 +138,15 @@ def build_query_text_request(*, json: Any = None, content: Any = None, **kwargs:
Answers the specified question using the provided text in the body.
- See https://aka.ms/azsdk/python/llcwiki for how to incorporate this request builder into your
- code flow.
+ See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
+ into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Post body of the request.
- :paramtype json: Any
+ :paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Post body of the request.
- :paramtype content: Any
+ :paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
@@ -156,7 +155,7 @@ def build_query_text_request(*, json: Any = None, content: Any = None, **kwargs:
Example:
.. code-block:: python
- # JSON input template you can fill out and use as your `json` input.
+ # JSON input template you can fill out and use as your body input.
json = {
"language": "str (optional)",
"question": "str",
@@ -193,7 +192,6 @@ def build_query_text_request(*, json: Any = None, content: Any = None, **kwargs:
api_version = "2021-05-01-preview"
accept = "application/json"
-
# Construct URL
url = kwargs.pop("template_url", "/:query-text")
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/README.md b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/README.md
index ce13a0947d16..ae1e8d2cb2c1 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/README.md
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/README.md
@@ -21,7 +21,7 @@ These sample programs show common scenarios for the QuestionAnswering client's o
|**File Name**|**Description**|
|-------------|---------------|
-|[sample_query_knowledgebase.py][query_knowledgebase] and [sample_query_knowledgebase_async.py][query_knowledgebase_async]|Ask a question from a knowledgebase|
+|[sample_query_knowledgebase.py][query_knowledgebase] and [sample_query_knowledgebase_async.py][query_knowledgebase_async]|Ask a question from a knowledge base|
|[sample_chat.py][chat] and [sample_chat_async.py][chat_async]|Ask a follow-up question (chit-chat)|
|[sample_query_text.py][query_text] and [sample_query_text_async.py][query_text_async]|Ask a question from provided text data|
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
index 014873fcf692..63fa238f7eda 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_chat_async.py
@@ -8,7 +8,7 @@
FILE: sample_chat_async.py
DESCRIPTION:
- This sample demonstrates how to ask a follow-up question (chit-chat) from a knowledgebase.
+ This sample demonstrates how to ask a follow-up question (chit-chat) from a knowledge base.
USAGE:
python sample_chat_async.py
@@ -16,7 +16,7 @@
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
- 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledgebase project.
+ 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
import asyncio
@@ -35,7 +35,7 @@ async def sample_chit_chat():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- first_question = qna.KnowledgebaseQueryParameters(
+ first_question = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -49,19 +49,20 @@ async def sample_chit_chat():
output = await client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=first_question
+ knowledge_base_query_options=first_question,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
+ best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(first_question.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(best_candidate.answer))
- followup_question = qna.KnowledgebaseQueryParameters(
+ followup_question = qna.KnowledgeBaseQueryOptions(
question="How long it takes to charge Surface?",
top=3,
confidence_score_threshold=0.2,
- context=qna.KnowledgebaseAnswerRequestContext(
+ context=qna.KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
- previous_qna_id=best_answer.id
+ previous_qna_id=best_candidate.id
),
answer_span_request=qna.AnswerSpanRequest(
enable=True,
@@ -73,11 +74,11 @@ async def sample_chit_chat():
output = await client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=followup_question
+ knowledge_base_query_options=followup_question,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(followup_question.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(output.answers[0].answer))
# [END chit_chat_async]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
index 21f39fa023b8..5f983bef4ccb 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_knowledgebase_async.py
@@ -8,7 +8,7 @@
FILE: sample_query_knowledgebase_async.py
DESCRIPTION:
- This sample demonstrates how to ask a question from a knowledgebase.
+ This sample demonstrates how to ask a question from a knowledge base.
USAGE:
python sample_query_knowledgebase_async.py
@@ -16,7 +16,7 @@
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
- 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledgebase project.
+ 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
import asyncio
@@ -35,7 +35,7 @@ async def sample_query_knowledgebase():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- input = qna.KnowledgebaseQueryParameters(
+ input = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -49,11 +49,12 @@ async def sample_query_knowledgebase():
output = await client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=input
+ knowledge_base_query_options=input,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
+ best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(input.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(best_candidate.answer))
# [END query_knowledgebase_async]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
index a88fda39ab92..a34195f7e320 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py
@@ -17,6 +17,7 @@
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
"""
+import asyncio
async def sample_query_text():
@@ -31,15 +32,15 @@ async def sample_query_text():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
- input = qna.TextQueryParameters(
+ input = qna.TextQueryOptions(
question="How long it takes to charge surface?",
records=[
- qna.TextInput(
+ qna.TextRecord(
text="Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
"It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
id="doc1"
),
- qna.TextInput(
+ qna.TextRecord(
text="You can use the USB port on your Surface Pro 4 power supply to charge other devices, like a phone, while your Surface charges. " +
"The USB port on the power supply is only for charging, not for data transfer. If you want to use a USB device, plug it into the USB port on your Surface.",
id="doc2"
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
index 8ccc882c353a..499b63d7fe50 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py
@@ -8,7 +8,7 @@
FILE: sample_chat.py
DESCRIPTION:
- This sample demonstrates how to ask a follow-up question (chit-chat) from a knowledgebase.
+ This sample demonstrates how to ask a follow-up question (chit-chat) from a knowledge base.
USAGE:
python sample_chat.py
@@ -16,7 +16,7 @@
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
- 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledgebase project.
+ 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
@@ -33,7 +33,7 @@ def sample_chit_chat():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- first_question = qna.KnowledgebaseQueryParameters(
+ first_question = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -47,19 +47,20 @@ def sample_chit_chat():
output = client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=first_question
+ knowledge_base_query_options=first_question,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
+ best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(first_question.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(best_candidate.answer))
- followup_question = qna.KnowledgebaseQueryParameters(
+ followup_question = qna.KnowledgeBaseQueryOptions(
question="How long it takes to charge Surface?",
top=3,
confidence_score_threshold=0.2,
- context=qna.KnowledgebaseAnswerRequestContext(
+ context=qna.KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
- previous_qna_id=best_answer.id
+ previous_qna_id=best_candidate.id
),
answer_span_request=qna.AnswerSpanRequest(
enable=True,
@@ -71,11 +72,11 @@ def sample_chit_chat():
output = client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=followup_question
+ knowledge_base_query_options=followup_question,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(followup_question.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(output.answers[0].answer))
# [END chit_chat]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
index ef1f4969eb6b..b499197cba26 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_knowledgebase.py
@@ -8,7 +8,7 @@
FILE: sample_query_knowledgebase.py
DESCRIPTION:
- This sample demonstrates how to ask a question from a knowledgebase.
+ This sample demonstrates how to ask a question from a knowledge base.
USAGE:
python sample_query_knowledgebase.py
@@ -16,7 +16,7 @@
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
- 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledgebase project.
+ 3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
@@ -33,7 +33,7 @@ def sample_query_knowledgebase():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- input = qna.KnowledgebaseQueryParameters(
+ input = qna.KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
@@ -47,11 +47,12 @@ def sample_query_knowledgebase():
output = client.query_knowledgebase(
project_name=knowledgebase_project,
- knowledgebase_query_parameters=input
+ knowledge_base_query_options=input,
+ deployment_name="test"
)
- best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
+ best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(input.question))
- print("A: {}".format(best_answer.answer_span.text))
+ print("A: {}".format(best_candidate.answer))
# [END query_knowledgebase]
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
index d18ffa620f0a..9f784b5a5e4c 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_query_text.py
@@ -31,15 +31,15 @@ def sample_query_text():
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
- input = qna.TextQueryParameters(
+ input = qna.TextQueryOptions(
question="How long it takes to charge surface?",
records=[
- qna.TextInput(
+ qna.TextRecord(
text="Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
"It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
id="doc1"
),
- qna.TextInput(
+ qna.TextRecord(
text="You can use the USB port on your Surface Pro 4 power supply to charge other devices, like a phone, while your Surface charges. " +
"The USB port on the power supply is only for charging, not for data transfer. If you want to use a USB device, plug it into the USB port on your Surface.",
id="doc2"
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml
new file mode 100644
index 000000000000..b7d2902a0989
--- /dev/null
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase.test_query_knowledgebase_python_dict.yaml
@@ -0,0 +1,48 @@
+interactions:
+- request:
+ body: '{"qnaId": 19}'
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '13'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-ai-language-questionanswering/1.0.0b1 Python/3.7.4 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://test-resource.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=test-project&deploymentName=test&api-version=2021-05-01-preview
+ response:
+ body:
+ string: "{\n \"answers\": [\n {\n \"questions\": [\n \"Charge
+ your Surface Pro 4\"\n ],\n \"answer\": \"**Charge your Surface
+ Pro 4**\\n\\n1. Connect the two parts of the power cord.\\n\\n2. Connect
+ the power cord securely to the charging port.\\n\\n3. Plug the power supply
+ into an electrical outlet.\",\n \"confidenceScore\": 1.0,\n \"id\":
+ 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"charge your surface pro 4\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n }\n ]\n}"
+ headers:
+ apim-request-id:
+ - 236db4d3-6b6a-4a54-b4a7-a85776b45d72
+ content-length:
+ - '583'
+ content-type:
+ - application/json; charset=utf-8
+ date:
+ - Thu, 15 Jul 2021 22:08:29 GMT
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ x-content-type-options:
+ - nosniff
+ x-envoy-upstream-service-time:
+ - '1056'
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_bad_request.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_bad_request.yaml
deleted file mode 100644
index af9de17280ea..000000000000
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_bad_request.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-interactions:
-- request:
- body: '{"qna_id": 19}'
- headers:
- Accept:
- - application/json
- Content-Length:
- - '14'
- Content-Type:
- - application/json
- User-Agent:
- - azsdk-python-ai-language-questionanswering/1.0.0b1 Python/3.7.4 (Windows-10-10.0.19041-SP0)
- method: POST
- uri: https://test-resource.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=test-project&deploymentName=test&api-version=2021-05-01-preview
- response:
- body:
- string: "{\n \"error\": {\n \"code\": \"BadArgument\",\n \"message\":
- \"Invalid input. See details.\",\n \"details\": [\n {\n \"code\":
- \"ValidationFailure\",\n \"message\": \"'Question' must not be empty.\",\n
- \ \"target\": \"Question\"\n }\n ]\n }\n}"
- headers:
- apim-request-id: 984a747d-795f-4205-bc2e-d2a799efb4ff
- content-length: '250'
- content-type: application/json; charset=utf-8
- date: Wed, 30 Jun 2021 18:12:33 GMT
- strict-transport-security: max-age=31536000; includeSubDomains; preload
- x-content-type-options: nosniff
- x-envoy-upstream-service-time: '12'
- status:
- code: 400
- message: Bad Request
- url: https://wuppe.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c&deploymentName=test&api-version=2021-05-01-preview
-version: 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml
new file mode 100644
index 000000000000..ecb0fec0b682
--- /dev/null
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/recordings/test_query_knowledgebase_async.test_query_knowledgebase_python_dict.yaml
@@ -0,0 +1,38 @@
+interactions:
+- request:
+ body: '{"qnaId": 19}'
+ headers:
+ Accept:
+ - application/json
+ Content-Length:
+ - '13'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - azsdk-python-ai-language-questionanswering/1.0.0b1 Python/3.7.4 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://test-resource.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=test-project&deploymentName=test&api-version=2021-05-01-preview
+ response:
+ body:
+ string: "{\n \"answers\": [\n {\n \"questions\": [\n \"Charge
+ your Surface Pro 4\"\n ],\n \"answer\": \"**Charge your Surface
+ Pro 4**\\n\\n1. Connect the two parts of the power cord.\\n\\n2. Connect
+ the power cord securely to the charging port.\\n\\n3. Plug the power supply
+ into an electrical outlet.\",\n \"confidenceScore\": 1.0,\n \"id\":
+ 19,\n \"source\": \"surface-pro-4-user-guide-EN.pdf\",\n \"metadata\":
+ {\n \"explicitlytaggedheading\": \"charge your surface pro 4\"\n },\n
+ \ \"dialog\": {\n \"isContextOnly\": false,\n \"prompts\":
+ []\n }\n }\n ]\n}"
+ headers:
+ apim-request-id: 527dd21e-90c2-4419-8e21-6704202b7098
+ content-length: '583'
+ content-type: application/json; charset=utf-8
+ date: Thu, 15 Jul 2021 22:08:30 GMT
+ strict-transport-security: max-age=31536000; includeSubDomains; preload
+ x-content-type-options: nosniff
+ x-envoy-upstream-service-time: '812'
+ status:
+ code: 200
+ message: OK
+ url: https://wuppe.api.cognitive.microsoft.com/language/:query-knowledgebases?projectName=190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c&deploymentName=test&api-version=2021-05-01-preview
+version: 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
index 470669ffa070..7fdd5e175663 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase.py
@@ -16,13 +16,13 @@
from azure.ai.language.questionanswering import QuestionAnsweringClient
from azure.ai.language.questionanswering.rest import *
from azure.ai.language.questionanswering.models import (
- KnowledgebaseQueryParameters,
- KnowledgebaseAnswerRequestContext,
+ KnowledgeBaseQueryOptions,
+ KnowledgeBaseAnswerRequestContext,
AnswerSpanRequest,
)
-class QnAKnowledgebaseTests(QuestionAnsweringTest):
+class QnAKnowledgeBaseTests(QuestionAnsweringTest):
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase_llc(self, qna_account, qna_key, qna_project):
@@ -125,10 +125,10 @@ def test_query_knowledgebase_llc_with_answerspan(self, qna_account, qna_key, qna
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="Ports and connectors",
top=3,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="Meet Surface Pro 4",
previous_qna_id=4
)
@@ -138,7 +138,7 @@ def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert output.answers
@@ -166,10 +166,10 @@ def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
@GlobalQuestionAnsweringAccountPreparer()
def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="Ports and connectors",
top=3,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="Meet Surface Pro 4",
previous_qna_id=4
),
@@ -184,7 +184,7 @@ def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, qna_pro
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert output.answers
@@ -235,7 +235,7 @@ def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, qna_pro
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 3
@@ -247,7 +247,7 @@ def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, qna_pro
def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
with client:
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
user_id="sd53lsY=",
@@ -263,18 +263,18 @@ def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_proje
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="How long it takes to charge Surface?",
top=3,
user_id="sd53lsY=",
confidence_score_threshold=0.2,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
previous_qna_id=confident_answers[0].id
),
@@ -288,7 +288,7 @@ def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_proje
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 2
@@ -301,14 +301,28 @@ def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_proje
def test_query_knowledgebase_only_id(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
with client:
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
qna_id=19
)
output = client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 1
+
+ @GlobalQuestionAnsweringAccountPreparer()
+ def test_query_knowledgebase_python_dict(self, qna_account, qna_key, qna_project):
+ client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
+ with client:
+ query_params = {"qna_id": 19}
+
+ output = client.query_knowledgebase(
+ project_name=qna_project,
+ deployment_name='test',
+ knowledge_base_query_options=query_params
+ )
+
+ assert len(output.answers) == 1
\ No newline at end of file
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
index 0bb798e0ca2f..fe671c39b49a 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_knowledgebase_async.py
@@ -16,15 +16,15 @@
from asynctestcase import AsyncQuestionAnsweringTest
from azure.ai.language.questionanswering.models import (
- KnowledgebaseQueryParameters,
- KnowledgebaseAnswerRequestContext,
+ KnowledgeBaseQueryOptions,
+ KnowledgeBaseAnswerRequestContext,
AnswerSpanRequest,
)
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering.rest import *
-class QnAKnowledgebaseTestsAsync(AsyncQuestionAnsweringTest):
+class QnAKnowledgeBaseTestsAsync(AsyncQuestionAnsweringTest):
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase_llc(self, qna_account, qna_key, qna_project):
@@ -127,10 +127,10 @@ async def test_query_knowledgebase_llc_with_answerspan(self, qna_account, qna_ke
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="Ports and connectors",
top=3,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="Meet Surface Pro 4",
previous_qna_id=4
)
@@ -140,7 +140,7 @@ async def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert output.answers
@@ -168,10 +168,10 @@ async def test_query_knowledgebase(self, qna_account, qna_key, qna_project):
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="Ports and connectors",
top=3,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="Meet Surface Pro 4",
previous_qna_id=4
),
@@ -186,7 +186,7 @@ async def test_query_knowledgebase_with_answerspan(self, qna_account, qna_key, q
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert output.answers
@@ -236,7 +236,7 @@ async def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, q
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 3
@@ -248,7 +248,7 @@ async def test_query_knowledgebase_with_dictparams(self, qna_account, qna_key, q
async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
async with client:
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="How long should my Surface battery last?",
top=3,
user_id="sd53lsY=",
@@ -264,18 +264,18 @@ async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 1
assert confident_answers[0].source == "surface-pro-4-user-guide-EN.pdf"
- query_params = KnowledgebaseQueryParameters(
+ query_params = KnowledgeBaseQueryOptions(
question="How long it takes to charge Surface?",
top=3,
user_id="sd53lsY=",
confidence_score_threshold=0.2,
- context=KnowledgebaseAnswerRequestContext(
+ context=KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
previous_qna_id=confident_answers[0].id
),
@@ -289,7 +289,7 @@ async def test_query_knowledgebase_with_followup(self, qna_account, qna_key, qna
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 2
@@ -306,21 +306,21 @@ async def test_query_knowledgebase_only_id(self, qna_account, qna_key, qna_proje
output = await client.query_knowledgebase(
project_name=qna_project,
deployment_name='test',
- knowledgebase_query_parameters=query_params
+ knowledge_base_query_options=query_params
)
assert len(output.answers) == 1
@GlobalQuestionAnsweringAccountPreparer()
- async def test_query_knowledgebase_bad_request(self, qna_account, qna_key, qna_project):
+ async def test_query_knowledgebase_python_dict(self, qna_account, qna_key, qna_project):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
async with client:
query_params = {"qna_id": 19}
- with pytest.raises(HttpResponseError):
- await client.query_knowledgebase(
- project_name=qna_project,
- deployment_name='test',
- knowledgebase_query_parameters=query_params
- )
+ output = await client.query_knowledgebase(
+ project_name=qna_project,
+ deployment_name='test',
+ knowledge_base_query_options=query_params
+ )
+ assert len(output.answers) == 1
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
index 9993ae313f6f..d1c73d036cac 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text.py
@@ -15,8 +15,8 @@
from azure.ai.language.questionanswering import QuestionAnsweringClient
from azure.ai.language.questionanswering.rest import *
from azure.ai.language.questionanswering.models import (
- TextQueryParameters,
- TextInput
+ TextQueryOptions,
+ TextRecord
)
class QnATests(QuestionAnsweringTest):
@@ -65,18 +65,18 @@ def test_query_text_llc(self, qna_account, qna_key):
@GlobalQuestionAnsweringAccountPreparer()
def test_query_text(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- params = TextQueryParameters(
+ params = TextQueryOptions(
question="What is the meaning of life?",
records=[
- TextInput(
+ TextRecord(
text="abc Graphics Surprise, surprise -- our 4K ",
id="doc1"
),
- TextInput(
+ TextRecord(
text="e graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on ",
id="doc2"
),
- TextInput(
+ TextRecord(
text="Graphics Surprise, surprise -- our 4K Envy 13 came with a discrete graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on the MX250 model, which is well above our 30-fps playability, the category average (69 fps) and what the Surface Laptop 2 (82 fps) achieved. The ZenBook S UX391UA (45 fps) fell flat on this real-world test but ran better than the base model Envy 13 (31 fps). Audio I had a good ol' time groovin' to the sound of the Envy 13's crisp speakers. HP went all out with the Envy, placing dual speakers on the underside of the chassis along with a third, top-firing driver above the keyboard. Devon Gilfillian's funky jam \"Here and Now\" boomed smooth, soulful tunes throughout my small apartment. The twang of the electric guitar played nicely with the thudding percussion but never overshadowed Gilfillian or the female backup vocals. Bang & Olufsen software comes preinstalled on the Envy 13, with equalizer controls so you can adjust the bass, midrange and treble to your liking. But even out of the box, you'll enjoy great sound without having to bust out your headphones. Battery Life Get an Envy 13 with the 1080p non-touch display if battery life is important to you. The FHD model endured for 11 hours and 11 minutes whereas the 4K model lasted only 4 hours and 36 minutes on our battery test, which involves continuous web browsing over Wi-Fi at 150 nits of brightness. MORE: Laptops with Best Battery Life - Longest Lasting Laptop Batteries Competing laptops like the ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53) outstayed the 4K Envy 13 but powered down long before the 1080p version. Webcam The 720p webcam on the Envy 13 is nothing to write home about. A selfie I snapped in my dimly lit room was covered in a haze of visual noise. My beard and hair were unkempt blobs, while my eyes looked like they were drawn on by a pointillist painter. If there's one positive, it's that the lens captures natural colors and even extracted the different shades of gray in my T-shirt. On the right edge of the Envy 13 is a physical kill switch that cuts the power to the webcam so you can feel reassured that nobody is snooping on you. Heat Leave the lapdesk at home - you don't have to worry about the Envy 13 overheating. After I played a 15-minute, full-HD video in full screen, the touchpad on the HP Envy 13 with a Core i7 CPU rose to only 83 degrees Fahrenheit while the keyboard (87 degrees) and underside (90 degrees) also remained well below our 95-degree comfort threshold. Even the toastiest part of the machine, the lower-left edge on the underside, topped out at 94 degrees. Software and Warranty It's a shame that a laptop with such beautiful hardware ships with such ugly software. Pre-installed on this machine are entirely too many programs that could either be packaged together or omitted altogether. HP provides an app called Audio Switch, which simply lets you switch your audio input/output between the internal speakers and headphones. As the same implies, HP's Command Center is where you can get information about your Envy 13 but also switch the thermal profiles between comfort and performance. Along with support documentation, HP also bundles in a setup program called JumpStart, a program for connecting printers and a redundant system-info app called Event Utility. Also installed on the Envy 13's Windows 10 Home OS are several Microsoft apps, including Simple Solitaire, Candy Crush Friends and Your Phone. Other third-party apps include Booking.com, Netflix and McAfee Security. HP ships the Envy 13 with a one-year warranty. See how HP did on our Tech Support Showdown and Best and Worst Brands ranking. Bottom Line The Envy 13 has cemented its standing as the ultimate laptop for college students or travelers. Along with 11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek, ultraportable chassis, fast performance, and powerful speakers. Best of all, the Envy 13 starts at a reasonable $799, which is hundreds less than the competition. In many ways, the Envy 13 is what we wanted the new MacBook Air to be. The new HP Envy 13 is everything I was hoping the new MacBook Air would be: fast, attractive and affordable. Just be sure to buy the right model. We strongly recommend the 1080p version over the 4K model because it lasts several hours longer on a charge and costs less. In fact, if we were reviewing the 4K model separately, we'd only give it a 3.5 rating. You should also consider the Envy 13 with a 10th Gen CPU, although we haven't gotten the chance to review it yet. If you absolutely need a high-res display, the 4K Envy 13 is one of many good options. We also recommend the Samsung Notebook 9 Pro, which has a similarly premium design but much better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another recommended alternative, though you might want to wait a few months for the rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop that checks all the right boxes --- as long as you buy the 1080p model. Credit: Laptop Mag HP Envy 13 (2019) Specs BluetoothBluetooth 5.0 BrandHP CPUIntel Core i7-8565U Card SlotsmicroSD Company Websitehttps://www8.hp.com/us/en/home.html Display Size13.3 Graphics CardNvidia GeForce MX250 Hard Drive Size512GB Hard Drive TypePCIe NVMe M.2 Highest Available Resolution3840 x 2160 Native Resolution3840 x 2160 Operating SystemWindows 10 Home Ports (excluding USB)USB 3.1 with Type-C, USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel Wireless-AC 9560 ",
id="doc3"
)
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
index 4a04f2ac5613..667a8fbd5c36 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
@@ -15,8 +15,8 @@
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering.rest import *
from azure.ai.language.questionanswering.models import (
- TextQueryParameters,
- TextInput
+ TextQueryOptions,
+ TextRecord
)
class QnATests(QuestionAnsweringTest):
@@ -67,18 +67,18 @@ async def test_query_text_llc(self, qna_account, qna_key):
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_text(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
- params = TextQueryParameters(
+ params = TextQueryOptions(
question="What is the meaning of life?",
records=[
- TextInput(
+ TextRecord(
text="abc Graphics Surprise, surprise -- our 4K ",
id="doc1"
),
- TextInput(
+ TextRecord(
text="e graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on ",
id="doc2"
),
- TextInput(
+ TextRecord(
text="Graphics Surprise, surprise -- our 4K Envy 13 came with a discrete graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on the MX250 model, which is well above our 30-fps playability, the category average (69 fps) and what the Surface Laptop 2 (82 fps) achieved. The ZenBook S UX391UA (45 fps) fell flat on this real-world test but ran better than the base model Envy 13 (31 fps). Audio I had a good ol' time groovin' to the sound of the Envy 13's crisp speakers. HP went all out with the Envy, placing dual speakers on the underside of the chassis along with a third, top-firing driver above the keyboard. Devon Gilfillian's funky jam \"Here and Now\" boomed smooth, soulful tunes throughout my small apartment. The twang of the electric guitar played nicely with the thudding percussion but never overshadowed Gilfillian or the female backup vocals. Bang & Olufsen software comes preinstalled on the Envy 13, with equalizer controls so you can adjust the bass, midrange and treble to your liking. But even out of the box, you'll enjoy great sound without having to bust out your headphones. Battery Life Get an Envy 13 with the 1080p non-touch display if battery life is important to you. The FHD model endured for 11 hours and 11 minutes whereas the 4K model lasted only 4 hours and 36 minutes on our battery test, which involves continuous web browsing over Wi-Fi at 150 nits of brightness. MORE: Laptops with Best Battery Life - Longest Lasting Laptop Batteries Competing laptops like the ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53) outstayed the 4K Envy 13 but powered down long before the 1080p version. Webcam The 720p webcam on the Envy 13 is nothing to write home about. A selfie I snapped in my dimly lit room was covered in a haze of visual noise. My beard and hair were unkempt blobs, while my eyes looked like they were drawn on by a pointillist painter. If there's one positive, it's that the lens captures natural colors and even extracted the different shades of gray in my T-shirt. On the right edge of the Envy 13 is a physical kill switch that cuts the power to the webcam so you can feel reassured that nobody is snooping on you. Heat Leave the lapdesk at home - you don't have to worry about the Envy 13 overheating. After I played a 15-minute, full-HD video in full screen, the touchpad on the HP Envy 13 with a Core i7 CPU rose to only 83 degrees Fahrenheit while the keyboard (87 degrees) and underside (90 degrees) also remained well below our 95-degree comfort threshold. Even the toastiest part of the machine, the lower-left edge on the underside, topped out at 94 degrees. Software and Warranty It's a shame that a laptop with such beautiful hardware ships with such ugly software. Pre-installed on this machine are entirely too many programs that could either be packaged together or omitted altogether. HP provides an app called Audio Switch, which simply lets you switch your audio input/output between the internal speakers and headphones. As the same implies, HP's Command Center is where you can get information about your Envy 13 but also switch the thermal profiles between comfort and performance. Along with support documentation, HP also bundles in a setup program called JumpStart, a program for connecting printers and a redundant system-info app called Event Utility. Also installed on the Envy 13's Windows 10 Home OS are several Microsoft apps, including Simple Solitaire, Candy Crush Friends and Your Phone. Other third-party apps include Booking.com, Netflix and McAfee Security. HP ships the Envy 13 with a one-year warranty. See how HP did on our Tech Support Showdown and Best and Worst Brands ranking. Bottom Line The Envy 13 has cemented its standing as the ultimate laptop for college students or travelers. Along with 11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek, ultraportable chassis, fast performance, and powerful speakers. Best of all, the Envy 13 starts at a reasonable $799, which is hundreds less than the competition. In many ways, the Envy 13 is what we wanted the new MacBook Air to be. The new HP Envy 13 is everything I was hoping the new MacBook Air would be: fast, attractive and affordable. Just be sure to buy the right model. We strongly recommend the 1080p version over the 4K model because it lasts several hours longer on a charge and costs less. In fact, if we were reviewing the 4K model separately, we'd only give it a 3.5 rating. You should also consider the Envy 13 with a 10th Gen CPU, although we haven't gotten the chance to review it yet. If you absolutely need a high-res display, the 4K Envy 13 is one of many good options. We also recommend the Samsung Notebook 9 Pro, which has a similarly premium design but much better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another recommended alternative, though you might want to wait a few months for the rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop that checks all the right boxes --- as long as you buy the 1080p model. Credit: Laptop Mag HP Envy 13 (2019) Specs BluetoothBluetooth 5.0 BrandHP CPUIntel Core i7-8565U Card SlotsmicroSD Company Websitehttps://www8.hp.com/us/en/home.html Display Size13.3 Graphics CardNvidia GeForce MX250 Hard Drive Size512GB Hard Drive TypePCIe NVMe M.2 Highest Available Resolution3840 x 2160 Native Resolution3840 x 2160 Operating SystemWindows 10 Home Ports (excluding USB)USB 3.1 with Type-C, USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel Wireless-AC 9560 ",
id="doc3"
)
diff --git a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/testcase.py b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/testcase.py
index 56c4be587ee2..483db8723483 100644
--- a/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/testcase.py
+++ b/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/testcase.py
@@ -44,9 +44,9 @@ class QuestionAnsweringTest(AzureTestCase):
def __init__(self, method_name):
super(QuestionAnsweringTest, self).__init__(method_name)
- self.scrubber.register_name_pair(os.environ.get("QNA_ACCOUNT"), TEST_ENDPOINT)
- self.scrubber.register_name_pair(os.environ.get("QNA_KEY"), TEST_KEY)
- self.scrubber.register_name_pair(os.environ.get("QNA_PROJECT"), TEST_PROJECT)
+ self.scrubber.register_name_pair(os.environ.get("AZURE_QUESTIONANSWERING_ENDPOINT"), TEST_ENDPOINT)
+ self.scrubber.register_name_pair(os.environ.get("AZURE_QUESTIONANSWERING_KEY"), TEST_KEY)
+ self.scrubber.register_name_pair(os.environ.get("AZURE_QUESTIONANSWERING_PROJECT"), TEST_PROJECT)
def get_oauth_endpoint(self):
raise NotImplementedError()
@@ -96,9 +96,9 @@ def create_resource(self, name, **kwargs):
return {
'location': REGION,
'resource_group': "rgname",
- 'qna_account': os.environ.get("QNA_ACCOUNT"),
- 'qna_key': os.environ.get("QNA_KEY"),
- 'qna_project': os.environ.get("QNA_PROJECT")
+ 'qna_account': os.environ.get("AZURE_QUESTIONANSWERING_ENDPOINT"),
+ 'qna_key': os.environ.get("AZURE_QUESTIONANSWERING_KEY"),
+ 'qna_project': os.environ.get("AZURE_QUESTIONANSWERING_PROJECT")
}
return {
'location': REGION,
diff --git a/sdk/cognitivelanguage/tests.yml b/sdk/cognitivelanguage/tests.yml
index b2b663ad9323..77ea573acbbc 100644
--- a/sdk/cognitivelanguage/tests.yml
+++ b/sdk/cognitivelanguage/tests.yml
@@ -1,4 +1,28 @@
-trigger: none
+trigger:
+ branches:
+ include:
+ - master
+ - main
+ - hotfix/*
+ - release/*
+ - restapi*
+ paths:
+ include:
+ - sdk/cognitivelanguage/
+ - scripts/
+
+pr:
+ branches:
+ include:
+ - master
+ - main
+ - feature/*
+ - hotfix/*
+ - release/*
+ - restapi*
+ paths:
+ include:
+ - sdk/cognitivelanguage/
stages:
- template: ../../eng/pipelines/templates/stages/archetype-sdk-tests.yml
@@ -8,9 +32,9 @@ stages:
MatrixReplace:
- TestSamples=.*/true
EnvVars:
- QNA_KEY: $(qna-key)
- QNA_PROJECT: 190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c
- QNA_ACCOUNT: $(qna-uri)
+ AZURE_QUESTIONANSWERING_KEY: $(qna-key)
+ AZURE_QUESTIONANSWERING_PROJECT: 190a9e13-8ede-4e4b-a8fd-c4d7f2aeab6c
+ AZURE_QUESTIONANSWERING_ENDPOINT: $(qna-uri)
AZURE_CLIENT_ID: $(aad-azure-sdk-test-client-id)
AZURE_CLIENT_SECRET: $(aad-azure-sdk-test-client-secret)
AZURE_SUBSCRIPTION_ID: $(azure-subscription-id)