diff --git a/sdk/monitor/azure-monitor-query/CHANGELOG.md b/sdk/monitor/azure-monitor-query/CHANGELOG.md
index 90c1edbeb985..ddd660be7f11 100644
--- a/sdk/monitor/azure-monitor-query/CHANGELOG.md
+++ b/sdk/monitor/azure-monitor-query/CHANGELOG.md
@@ -10,6 +10,7 @@
- Rename `LogsBatchQueryRequest` to `LogsBatchQuery`.
- `include_render` is now renamed to `include_visualization` in the query API.
- `LogsQueryResult` and `LogsBatchQueryResult` now return `visualization` instead of `render`.
+- `start_time`, `duration` and `end_time` are now replaced with a single param called `timespan`
### Bugs Fixed
diff --git a/sdk/monitor/azure-monitor-query/README.md b/sdk/monitor/azure-monitor-query/README.md
index 6ba734f6bd96..4805006ccdb1 100644
--- a/sdk/monitor/azure-monitor-query/README.md
+++ b/sdk/monitor/azure-monitor-query/README.md
@@ -100,7 +100,7 @@ Each set of metric values is a time series with the following characteristics:
## Examples
- [Single logs query](#single-logs-query)
- - [Specify duration](#specify-duration)
+ - [Specify timespan](#specify-timespan)
- [Set logs query timeout](#set-logs-query-timeout)
- [Batch logs query](#batch-logs-query)
- [Query metrics](#query-metrics)
@@ -113,9 +113,9 @@ Each set of metric values is a time series with the following characteristics:
This example shows getting a log query. To handle the response and view it in a tabular form, the [pandas](https://pypi.org/project/pandas/) library is used. See the [samples][python-query-samples] if you choose not to use pandas.
-#### Specify duration
+#### Specify timespan
-The `duration` parameter specifies the time duration for which to query the data. This argument can also be accompanied with either `start_time` or `end_time`. If either `start_time` or `end_time` aren't provided, the current time is used as the end time. As an alternative, the `start_time` and `end_time` arguments can be provided together instead of the `duration` argument. For example:
+The `timespan` parameter specifies the time duration for which to query the data. The timespan for which to query the data. This can be a timedelta, a timedelta and a start datetime, or a start datetime/end datetime. For example:
```python
import os
@@ -132,12 +132,14 @@ client = LogsQueryClient(credential)
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
+start_time=datetime(2021, 7, 2)
+end_time=datetime.now()
+
# returns LogsQueryResult
response = client.query(
os.environ['LOG_WORKSPACE_ID'],
query,
- start_time=datetime(2021, 6, 2),
- end_time=datetime.now()
+ timespan=(start_time, end_time)
)
if not response.tables:
@@ -185,14 +187,13 @@ client = LogsQueryClient(credential)
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
- duration=timedelta(hours=1),
+ timespan=timedelta(hours=1),
workspace_id=os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
- duration=timedelta(hours=1),
- start_time=datetime(2021, 6, 2),
+ timespan=(datetime(2021, 6, 2), timedelta(hours=1)),
workspace_id=os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQueryRequest(
@@ -270,13 +271,13 @@ from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
client = MetricsQueryClient(credential)
-
+start_time = datetime(2021, 5, 25)
+duration = timedelta(days=1)
metrics_uri = os.environ['METRICS_RESOURCE_URI']
response = client.query(
metrics_uri,
metric_names=["PublishSuccessCount"],
- start_time=datetime(2021, 5, 25),
- duration=timedelta(days=1),
+ timespan=(start_time, duration)
)
for metric in response.metrics:
@@ -322,8 +323,6 @@ metrics_uri = os.environ['METRICS_RESOURCE_URI']
response = client.query(
metrics_uri,
metric_names=["MatchedEventCount"],
- start_time=datetime(2021, 6, 21),
- duration=timedelta(days=1),
aggregations=[AggregationType.COUNT]
)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
index 727906c27494..d1333a4f362c 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
@@ -4,6 +4,7 @@
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
+from datetime import datetime, timedelta
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import HttpResponseError
@@ -49,26 +50,34 @@ def order_results(request_order, responses):
ordered = [mapping[id] for id in request_order]
return ordered
-def construct_iso8601(start=None, end=None, duration=None):
- if duration is not None:
- duration = 'PT{}S'.format(duration.total_seconds())
+def construct_iso8601(timespan=None):
+ if not timespan:
+ return None
+ try:
+ start, end, duration = None, None, None
+ if isinstance(timespan[1], datetime): # we treat thi as start_time, end_time
+ start, end = timespan[0], timespan[1]
+ elif isinstance(timespan[1], timedelta): # we treat this as start_time, duration
+ start, duration = timespan[0], timespan[1]
+ else:
+ raise ValueError('Tuple must be a start datetime with a timedelta or an end datetime.')
+ except TypeError:
+ duration = timespan # it means only duration (timedelta) is provideds
+ if duration:
+ try:
+ duration = 'PT{}S'.format(duration.total_seconds())
+ except AttributeError:
+ raise ValueError('timespan must be a timedelta or a tuple.')
iso_str = None
if start is not None:
start = Serializer.serialize_iso(start)
- if end and duration:
- raise ValueError("start_time can only be provided with duration or end_time, but not both.")
if end is not None:
end = Serializer.serialize_iso(end)
iso_str = start + '/' + end
elif duration is not None:
iso_str = start + '/' + duration
- else:
- raise ValueError("Start time must be provided along with duration or end time.")
- elif end is not None:
- if not duration:
- raise ValueError("End time must be provided along with duration or start time.")
- end = Serializer.serialize_iso(end)
- iso_str = duration + '/' + end
+ else: # means that an invalid value None that is provided with start_time
+ raise ValueError("Duration or end_time cannot be None when provided with start_time.")
else:
iso_str = duration
return iso_str
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
index f23157cfca03..30ae9238e579 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
@@ -49,28 +49,22 @@ def __init__(self, credential, **kwargs):
)
self._query_op = self._client.query
- def query(self, workspace_id, query, duration=None, **kwargs):
+ def query(self, workspace_id, query, timespan=None, **kwargs):
# type: (str, str, Optional[timedelta], Any) -> LogsQueryResult
"""Execute an Analytics query.
Executes an Analytics query for data.
- **Note**: Although the start_time, end_time, duration are optional parameters, it is highly
- recommended to specify the timespan. If not, the entire dataset is queried.
-
:param workspace_id: ID of the workspace. This is Workspace ID from the Properties blade in the
Azure portal.
:type workspace_id: str
:param query: The Analytics query. Learn more about the `Analytics query syntax
`_.
:type query: str
- :param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
- with either start_time or end_time. If start_time or end_time is not provided, the current time is
- taken as the end time.
- :keyword datetime start_time: The start time from which to query the data. This should be accompanied
- with either end_time or duration.
- :keyword datetime end_time: The end time till which to query the data. This should be accompanied
- with either start_time or duration.
+ :param timespan: The timespan for which to query the data. This can be a timedelta,
+ a timedelta and a start datetime, or a start datetime/end datetime.
+ :type timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ or tuple[~datetime.datetime, ~datetime.datetime]
:keyword int server_timeout: the server timeout in seconds. The default timeout is 3 minutes,
and the maximum timeout is 10 minutes.
:keyword bool include_statistics: To get information about query statistics.
@@ -93,9 +87,7 @@ def query(self, workspace_id, query, duration=None, **kwargs):
:dedent: 0
:caption: Get a response for a single Log Query
"""
- start = kwargs.pop('start_time', None)
- end = kwargs.pop('end_time', None)
- timespan = construct_iso8601(start, end, duration)
+ timespan = construct_iso8601(timespan)
include_statistics = kwargs.pop("include_statistics", False)
include_visualization = kwargs.pop("include_visualization", False)
server_timeout = kwargs.pop("server_timeout", None)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
index 2467b92216f8..4f0e4a5fe53b 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
@@ -53,7 +53,7 @@ def __init__(self, credential, **kwargs):
self._namespace_op = self._client.metric_namespaces
self._definitions_op = self._client.metric_definitions
- def query(self, resource_uri, metric_names, duration=None, **kwargs):
+ def query(self, resource_uri, metric_names, **kwargs):
# type: (str, list, Optional[timedelta], Any) -> MetricsResult
"""Lists the metric values for a resource.
@@ -64,13 +64,10 @@ def query(self, resource_uri, metric_names, duration=None, **kwargs):
:type resource_uri: str
:param metric_names: The names of the metrics to retrieve.
:type metric_names: list[str]
- :param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
- with either start_time or end_time. If start_time or end_time is not provided, the current time is
- taken as the end time.
- :keyword datetime start_time: The start time from which to query the data. This should be accompanied
- with either end_time or duration.
- :keyword datetime end_time: The end time till which to query the data. This should be accompanied
- with either start_time or duration.
+ :keyword timespan: The timespan for which to query the data. This can be a timedelta,
+ a timedelta and a start datetime, or a start datetime/end datetime.
+ :paramtype timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ or tuple[~datetime.datetime, ~datetime.datetime]
:keyword interval: The interval (i.e. timegrain) of the query.
:paramtype interval: ~datetime.timedelta
:keyword aggregations: The list of aggregation types to retrieve. Use `azure.monitor.query.AggregationType`
@@ -112,12 +109,11 @@ def query(self, resource_uri, metric_names, duration=None, **kwargs):
:dedent: 0
:caption: Get a response for a single Metrics Query
"""
- start = kwargs.pop('start_time', None)
- end = kwargs.pop('end_time', None)
+
aggregations = kwargs.pop("aggregations", None)
if aggregations:
kwargs.setdefault("aggregation", ",".join(aggregations))
- timespan = construct_iso8601(start, end, duration)
+ timespan = construct_iso8601(kwargs.pop("timespan", None))
kwargs.setdefault("metricnames", ",".join(metric_names))
kwargs.setdefault("timespan", timespan)
generated = self._metrics_op.list(resource_uri, connection_verify=False, **kwargs)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
index 6f3372a2f53d..79e36c32f3cf 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
@@ -158,13 +158,10 @@ class LogsBatchQuery(object):
:param query: The Analytics query. Learn more about the `Analytics query syntax
`_.
:type query: str
- :param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
- with either start_time or end_time. If start_time or end_time is not provided, the current time is
- taken as the end time.
- :keyword datetime start_time: The start time from which to query the data. This should be accompanied
- with either end_time or duration.
- :keyword datetime end_time: The end time till which to query the data. This should be accompanied
- with either start_time or duration.
+ :param timespan: The timespan for which to query the data. This can be a timedelta,
+ a timedelta and a start datetime, or a start datetime/end datetime.
+ :type timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ or tuple[~datetime.datetime, ~datetime.datetime]
:keyword additional_workspaces: A list of workspaces that are included in the query.
These can be qualified workspace names, workspace Ids, or Azure resource Ids.
:paramtype additional_workspaces: list[str]
@@ -180,7 +177,7 @@ class LogsBatchQuery(object):
:paramtype headers: dict[str, str]
"""
- def __init__(self, query, workspace_id, duration=None, **kwargs): #pylint: disable=super-init-not-called
+ def __init__(self, query, workspace_id, timespan, **kwargs): #pylint: disable=super-init-not-called
# type: (str, str, Optional[str], Any) -> None
include_statistics = kwargs.pop("include_statistics", False)
include_visualization = kwargs.pop("include_visualization", False)
@@ -202,9 +199,7 @@ def __init__(self, query, workspace_id, duration=None, **kwargs): #pylint: disab
headers['Prefer'] = prefer
except TypeError:
headers = {'Prefer': prefer}
- start = kwargs.pop('start_time', None)
- end = kwargs.pop('end_time', None)
- timespan = construct_iso8601(start, end, duration)
+ timespan = construct_iso8601(timespan)
additional_workspaces = kwargs.pop("additional_workspaces", None)
self.id = kwargs.get("request_id", str(uuid.uuid4()))
self.body = {
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
index e68cf4f3983b..06718475a095 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
@@ -5,8 +5,8 @@
# license information.
# --------------------------------------------------------------------------
-from datetime import timedelta
-from typing import Any, Union, Sequence, Dict, Optional, TYPE_CHECKING
+from datetime import datetime, timedelta
+from typing import Any, Tuple, Union, Sequence, Dict, Optional, TYPE_CHECKING
from azure.core.exceptions import HttpResponseError
from .._generated.aio._monitor_query_client import MonitorQueryClient
@@ -42,28 +42,22 @@ async def query(
self,
workspace_id: str,
query: str,
- duration: Optional[timedelta] = None,
+ timespan: Optional[Union[timedelta, Tuple[datetime, timedelta], Tuple[datetime, datetime]]] = None,
**kwargs: Any) -> LogsQueryResult:
"""Execute an Analytics query.
Executes an Analytics query for data.
- **Note**: Although the start_time, end_time, duration are optional parameters, it is highly
- recommended to specify the timespan. If not, the entire dataset is queried.
-
:param workspace_id: ID of the workspace. This is Workspace ID from the Properties blade in the
Azure portal.
:type workspace_id: str
:param query: The Analytics query. Learn more about the `Analytics query syntax
`_.
:type query: str
- :param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
- with either start_time or end_time. If start_time or end_time is not provided, the current time is
- taken as the end time.
- :keyword datetime start_time: The start time from which to query the data. This should be accompanied
- with either end_time or duration.
- :keyword datetime end_time: The end time till which to query the data. This should be accompanied
- with either start_time or duration.
+ :param timespan: The timespan for which to query the data. This can be a timedelta,
+ a timedelta and a start datetime, or a start datetime/end datetime.
+ :type timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ or tuple[~datetime.datetime, ~datetime.datetime]
:keyword int server_timeout: the server timeout. The default timeout is 3 minutes,
and the maximum timeout is 10 minutes.
:keyword bool include_statistics: To get information about query statistics.
@@ -77,9 +71,7 @@ async def query(
:rtype: ~azure.monitor.query.LogsQueryResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
- start = kwargs.pop('start_time', None)
- end = kwargs.pop('end_time', None)
- timespan = construct_iso8601(start, end, duration)
+ timespan = construct_iso8601(timespan)
include_statistics = kwargs.pop("include_statistics", False)
include_visualization = kwargs.pop("include_visualization", False)
server_timeout = kwargs.pop("server_timeout", None)
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
index 4924611a393a..a7095ef6f081 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
@@ -47,7 +47,6 @@ async def query(
self,
resource_uri: str,
metric_names: List,
- duration: Optional[timedelta] = None,
**kwargs: Any
) -> MetricsResult:
"""Lists the metric values for a resource.
@@ -59,13 +58,10 @@ async def query(
:type resource_uri: str
:param metric_names: The names of the metrics to retrieve.
:type metric_names: list
- :param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
- with either start_time or end_time. If start_time or end_time is not provided, the current time is
- taken as the end time.
- :keyword datetime start_time: The start time from which to query the data. This should be accompanied
- with either end_time or duration.
- :keyword datetime end_time: The end time till which to query the data. This should be accompanied
- with either start_time or duration.
+ :keyword timespan: The timespan for which to query the data. This can be a timedelta,
+ a timedelta and a start datetime, or a start datetime/end datetime.
+ :paramtype timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ or tuple[~datetime.datetime, ~datetime.datetime]
:keyword interval: The interval (i.e. timegrain) of the query.
:paramtype interval: ~datetime.timedelta
:keyword aggregations: The list of aggregation types to retrieve. Use `azure.monitor.query.AggregationType`
@@ -98,9 +94,7 @@ async def query(
:rtype: ~azure.monitor.query.MetricsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
- start = kwargs.pop('start_time', None)
- end = kwargs.pop('end_time', None)
- timespan = construct_iso8601(start, end, duration)
+ timespan = construct_iso8601(kwargs.pop('timespan', None))
kwargs.setdefault("metricnames", ",".join(metric_names))
kwargs.setdefault("timespan", timespan)
aggregations = kwargs.pop("aggregations", None)
diff --git a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py
index 51d16b614467..4aebbde5e27f 100644
--- a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_client_async.py
@@ -24,7 +24,7 @@ async def logs_query():
# returns LogsQueryResult
async with client:
- response = await client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
if not response.tables:
print("No results for the query")
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
index 7f95875af8f3..d9457b6d4b66 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
@@ -20,10 +20,8 @@
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
-end_time = datetime.now(UTC())
-
# returns LogsQueryResult
-response = client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=timedelta(days=1), end_time=end_time)
+response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1))
if not response.tables:
print("No results for the query")
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py
index 89a55899e5d6..f6a1bff2517d 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client_without_pandas.py
@@ -16,10 +16,8 @@
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
-end_time = datetime.now(UTC())
-
# returns LogsQueryResult
-response = client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=timedelta(hours=1), end_time=end_time)
+response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(hours=1))
if not response.tables:
print("No results for the query")
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py b/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py
index 6e018588b99d..29feebc087ce 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py
@@ -16,10 +16,8 @@
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
-end_time = datetime.now(UTC())
-
# returns LogsQueryResult
-response = client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=timedelta(days=1), end_time=end_time)
+response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1))
try:
table = response.tables[0]
diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
index 4568d2caf48c..6c9ea9f6fd13 100644
--- a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
+++ b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
@@ -1,3 +1,5 @@
+from datetime import timedelta
+from time import time
import pytest
import os
from azure.identity.aio import ClientSecretCredential
@@ -22,7 +24,7 @@ async def test_logs_auth():
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
# returns LogsQueryResult
- response = await client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
assert response is not None
assert response.tables is not None
@@ -35,6 +37,7 @@ async def test_logs_server_timeout():
response = await client.query(
os.environ['LOG_WORKSPACE_ID'],
"range x from 1 to 10000000000 step 1 | count",
+ timespan=None,
server_timeout=1,
)
assert e.message.contains('Gateway timeout')
@@ -46,18 +49,19 @@ async def test_logs_query_batch():
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= "AppRequests | take 2",
- workspace_id= os.environ['LOG_WORKSPACE_ID']
+ workspace_id= os.environ['LOG_WORKSPACE_ID'],
+ timespan=None
),
]
response = await client.query_batch(requests)
@@ -76,6 +80,7 @@ async def test_logs_single_query_additional_workspaces_async():
response = await client.query(
os.environ['LOG_WORKSPACE_ID'],
query,
+ timespan=None,
additional_workspaces=[os.environ["SECONDARY_WORKSPACE_ID"]],
)
@@ -92,13 +97,13 @@ async def test_logs_query_batch_additional_workspaces():
requests = [
LogsBatchQuery(
query,
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID'],
additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]
),
LogsBatchQuery(
query,
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID'],
additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]
),
@@ -123,7 +128,7 @@ async def test_logs_single_query_with_render():
query = """AppRequests"""
# returns LogsQueryResult
- response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, include_visualization=True)
+ response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True)
assert response.visualization is not None
@@ -135,7 +140,7 @@ async def test_logs_single_query_with_render_and_stats():
query = """AppRequests"""
# returns LogsQueryResult
- response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, include_visualization=True, include_statistics=True)
+ response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True)
assert response.visualization is not None
assert response.statistics is not None
diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
index 397178256497..fb76a4f4d7ea 100644
--- a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
+++ b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
@@ -20,8 +20,7 @@ async def test_metrics_auth():
response = await client.query(
os.environ['METRICS_RESOURCE_URI'],
metric_names=["MatchedEventCount"],
- start_time=datetime(2021, 6, 21),
- duration=timedelta(days=1),
+ timespan=timedelta(days=1),
aggregations=[AggregationType.COUNT]
)
assert response
diff --git a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py
index 39512ae97e29..11995ed8812c 100644
--- a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py
+++ b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/metric_query.py
@@ -49,8 +49,6 @@ def run_sync(self):
self.metrics_client.query(
self.metrics_uri,
self.names,
- start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc),
- end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc),
aggregations=self.aggregations
)
@@ -64,7 +62,5 @@ async def run_async(self):
await self.async_metrics_client.query(
self.metrics_uri,
self.names,
- start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc),
- end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc),
aggregations=self.aggregations
)
diff --git a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py
index a8530961df54..724c5dec3d95 100644
--- a/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py
+++ b/sdk/monitor/azure-monitor-query/tests/perfstress_tests/single_query.py
@@ -46,11 +46,12 @@ def run_sync(self):
Avoid putting any ancilliary logic (e.g. generating UUIDs), and put this in the setup/init instead
so that we're only measuring the client API call.
"""
+ start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc)
+ end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc)
self.logs_client.query(
self.workspace_id,
self.query,
- start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc),
- end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc),
+ timespan=(start_time, end_time)
)
async def run_async(self):
@@ -60,9 +61,10 @@ async def run_async(self):
Avoid putting any ancilliary logic (e.g. generating UUIDs), and put this in the setup/init instead
so that we're only measuring the client API call.
"""
+ start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc)
+ end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc)
await self.async_logs_client.query(
self.workspace_id,
self.query,
- start_time=datetime(2021, 7, 25, 0, 0, 0, tzinfo=timezone.utc),
- end_time=datetime(2021, 7, 26, 0, 0, 0, tzinfo=timezone.utc),
+ timespan=(start_time, end_time)
)
diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
index 2f46e16b74d8..e9e91b316dcc 100644
--- a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
+++ b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
@@ -1,3 +1,4 @@
+from datetime import timedelta
import pytest
import os
from azure.identity import ClientSecretCredential
@@ -34,7 +35,7 @@ def test_logs_single_query_with_non_200():
where TimeGenerated > ago(12h)"""
with pytest.raises(HttpResponseError) as e:
- client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
assert "SemanticError" in e.value.message
@@ -44,7 +45,7 @@ def test_logs_single_query_with_partial_success():
client = LogsQueryClient(credential)
query = "set truncationmaxrecords=1; union * | project TimeGenerated | take 10"
- response = client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
assert response is not None
@@ -57,6 +58,7 @@ def test_logs_server_timeout():
response = client.query(
os.environ['LOG_WORKSPACE_ID'],
"range x from 1 to 1000000000000000 step 1 | count",
+ timespan=None,
server_timeout=1,
)
assert 'Gateway timeout' in e.value.message
@@ -68,18 +70,19 @@ def test_logs_query_batch():
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= "AppRequests | take 2",
- workspace_id= os.environ['LOG_WORKSPACE_ID']
+ workspace_id= os.environ['LOG_WORKSPACE_ID'],
+ timespan=None
),
]
response = client.query_batch(requests)
@@ -93,7 +96,7 @@ def test_logs_single_query_with_statistics():
query = """AppRequests"""
# returns LogsQueryResult
- response = client.query(os.environ['LOG_WORKSPACE_ID'], query, include_statistics=True)
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_statistics=True)
assert response.statistics is not None
@@ -115,7 +118,7 @@ def test_logs_single_query_with_render_and_stats():
query = """AppRequests"""
# returns LogsQueryResult
- response = client.query(os.environ['LOG_WORKSPACE_ID'], query, include_visualization=True, include_statistics=True)
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True, include_statistics=True)
assert response.visualization is not None
assert response.statistics is not None
@@ -127,19 +130,20 @@ def test_logs_query_batch_with_statistics_in_some():
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests|
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID'],
include_statistics=True
),
LogsBatchQuery(
query= "AppRequests",
workspace_id= os.environ['LOG_WORKSPACE_ID'],
+ timespan=None,
include_statistics=True
),
]
@@ -160,6 +164,7 @@ def test_logs_single_query_additional_workspaces():
response = client.query(
os.environ['LOG_WORKSPACE_ID'],
query,
+ timespan=None,
additional_workspaces=[os.environ["SECONDARY_WORKSPACE_ID"]],
)
@@ -175,13 +180,13 @@ def test_logs_query_batch_additional_workspaces():
requests = [
LogsBatchQuery(
query,
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID'],
additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]
),
LogsBatchQuery(
query,
- timespan="PT1H",
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID'],
additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]
),
diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py b/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py
index 5045ebe78125..b92ae331933b 100644
--- a/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py
+++ b/sdk/monitor/azure-monitor-query/tests/test_logs_timespans.py
@@ -30,7 +30,7 @@ def callback(request):
dic = json.loads(request.http_request.body)
assert dic.get('timespan') is None
# returns LogsQueryResult
- client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
@pytest.mark.live_test_only
def test_query_start_and_end_time():
@@ -45,22 +45,7 @@ def callback(request):
dic = json.loads(request.http_request.body)
assert dic.get('timespan') is not None
- client.query(os.environ['LOG_WORKSPACE_ID'], query, start_time=start_time, end_time=end_time, raw_request_hook=callback)
-
-@pytest.mark.live_test_only
-def test_query_duration_and_end_time():
- credential = _credential()
- client = LogsQueryClient(credential)
- query = "AppRequests | take 5"
-
- end_time = datetime.now(UTC())
- duration = timedelta(days=3)
-
- def callback(request):
- dic = json.loads(request.http_request.body)
- assert 'PT259200.0S/' in dic.get('timespan')
-
- client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=duration, end_time=end_time, raw_request_hook=callback)
+ client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time, end_time), raw_request_hook=callback)
@pytest.mark.live_test_only
def test_query_duration_and_start_time():
@@ -76,7 +61,7 @@ def callback(request):
dic = json.loads(request.http_request.body)
assert '/PT259200.0S' in dic.get('timespan')
- client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=duration, start_time=start_time, raw_request_hook=callback)
+ client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=(start_time,duration), raw_request_hook=callback)
@pytest.mark.live_test_only
@@ -91,7 +76,7 @@ def callback(request):
dic = json.loads(request.http_request.body)
assert 'PT259200.0S' in dic.get('timespan')
- client.query(os.environ['LOG_WORKSPACE_ID'], query, duration=duration, raw_request_hook=callback)
+ client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=duration, raw_request_hook=callback)
def test_duration_to_iso8601():
d1 = timedelta(days=1)
@@ -102,19 +87,13 @@ def test_duration_to_iso8601():
d6 = timedelta(milliseconds=100000)
d7 = timedelta(hours=24, days=1)
- assert construct_iso8601(duration=d1) == 'PT86400.0S'
- assert construct_iso8601(duration=d2) == 'PT604800.0S'
- assert construct_iso8601(duration=d3) == 'PT2160000.0S'
- assert construct_iso8601(duration=d4) == 'PT10.0S'
- assert construct_iso8601(duration=d5) == 'PT0.001S'
- assert construct_iso8601(duration=d5) == 'PT0.001S'
- assert construct_iso8601(duration=d7) == 'PT172800.0S'
-
- with pytest.raises(ValueError, match="End time must be provided along with duration or start time."):
- construct_iso8601(end=datetime.now(UTC()))
-
- with pytest.raises(ValueError, match="Start time must be provided along with duration or end time."):
- construct_iso8601(start=datetime.now(UTC()))
+ assert construct_iso8601(timespan=d1) == 'PT86400.0S'
+ assert construct_iso8601(timespan=d2) == 'PT604800.0S'
+ assert construct_iso8601(timespan=d3) == 'PT2160000.0S'
+ assert construct_iso8601(timespan=d4) == 'PT10.0S'
+ assert construct_iso8601(timespan=d5) == 'PT0.001S'
+ assert construct_iso8601(timespan=d5) == 'PT0.001S'
+ assert construct_iso8601(timespan=d7) == 'PT172800.0S'
- with pytest.raises(ValueError, match="start_time can only be provided with duration or end_time, but not both."):
- construct_iso8601(end=datetime.now(UTC()), start=datetime(2020, 10, 10), duration=d3)
+ with pytest.raises(ValueError, match="timespan must be a timedelta or a tuple."):
+ construct_iso8601(timespan=(datetime.now(UTC())))
diff --git a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
index b1f6df6a9d7e..95ee209b6775 100644
--- a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
+++ b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
@@ -19,8 +19,7 @@ def test_metrics_auth():
response = client.query(
os.environ['METRICS_RESOURCE_URI'],
metric_names=["MatchedEventCount"],
- start_time=datetime(2021, 6, 21),
- duration=timedelta(days=1),
+ timespan=timedelta(days=1),
aggregations=[AggregationType.COUNT]
)
assert response