Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

import { mapValues, first, last, isNaN } from 'lodash';
import moment from 'moment';
import { ElasticsearchClient } from 'kibana/server';
import {
isTooManyBucketsPreviewException,
Expand Down Expand Up @@ -121,9 +122,14 @@ const getMetric: (
const intervalAsSeconds = getIntervalInSeconds(interval);
const intervalAsMS = intervalAsSeconds * 1000;

const to = roundTimestamp(timeframe ? timeframe.end : Date.now(), timeUnit);
const to = moment(timeframe ? timeframe.end : Date.now())
.add(1, timeUnit)
.startOf(timeUnit)
.valueOf();

// We need enough data for 5 buckets worth of data. We also need
// to convert the intervalAsSeconds to milliseconds.
// TODO: We only need to get 5 buckets for the rate query, so this logic should move there.
const minimumFrom = to - intervalAsMS * MINIMUM_BUCKETS;

const from = roundTimestamp(
Expand Down Expand Up @@ -225,29 +231,40 @@ const getValuesFromAggregations = (
if (!buckets.length) return null; // No Data state

if (aggType === Aggregators.COUNT) {
return buckets
.map((bucket) => ({
key: bucket.from_as_string,
value: bucket.doc_count,
}))
.filter(dropPartialBuckets(dropPartialBucketsOptions));
return buckets.map((bucket) => ({
key: bucket.from_as_string,
value: bucket.doc_count,
}));
}
if (aggType === Aggregators.P95 || aggType === Aggregators.P99) {
return buckets
.map((bucket) => {
const values = bucket.aggregatedValue?.values || [];
const firstValue = first(values);
if (!firstValue) return null;
return { key: bucket.from_as_string, value: firstValue.value };
})
.filter(dropPartialBuckets(dropPartialBucketsOptions));
return buckets.map((bucket) => {
const values = bucket.aggregatedValue?.values || [];
const firstValue = first(values);
if (!firstValue) return null;
return { key: bucket.from_as_string, value: firstValue.value };
});
}
return buckets
.map((bucket) => ({

if (aggType === Aggregators.AVERAGE) {
return buckets.map((bucket) => ({
key: bucket.key_as_string ?? bucket.from_as_string,
value: bucket.aggregatedValue?.value ?? null,
}))
.filter(dropPartialBuckets(dropPartialBucketsOptions));
}));
}

if (aggType === Aggregators.RATE) {
return buckets
.map((bucket) => ({
key: bucket.key_as_string ?? bucket.from_as_string,
value: bucket.aggregatedValue?.value ?? null,
}))
.filter(dropPartialBuckets(dropPartialBucketsOptions));
}

return buckets.map((bucket) => ({
key: bucket.key_as_string ?? bucket.from_as_string,
value: bucket.aggregatedValue?.value ?? null,
}));
} catch (e) {
return NaN; // Error state
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,13 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import moment from 'moment';

import { networkTraffic } from '../../../../../common/inventory_models/shared/metrics/snapshot/network_traffic';
import { MetricExpressionParams, Aggregators } from '../types';
import { getIntervalInSeconds } from '../../../../utils/get_interval_in_seconds';
import { roundTimestamp } from '../../../../utils/round_timestamp';
import { createPercentileAggregation } from './create_percentile_aggregation';
import { calculateDateHistogramOffset } from '../../../metrics/lib/calculate_date_histogram_offset';

const MINIMUM_BUCKETS = 5;
const COMPOSITE_RESULTS_PER_PAGE = 100;

const getParsedFilterQuery: (filterQuery: string | undefined) => Record<string, any> | null = (
Expand All @@ -38,20 +36,8 @@ export const getElasticsearchMetricQuery = (
const interval = `${timeSize}${timeUnit}`;
const intervalAsSeconds = getIntervalInSeconds(interval);
const intervalAsMS = intervalAsSeconds * 1000;

const to = moment(timeframe ? timeframe.end : Date.now())
.add(1, timeUnit)
.startOf(timeUnit)
.valueOf();

// We need enough data for 5 buckets worth of data. We also need
// to convert the intervalAsSeconds to milliseconds.
const minimumFrom = to - intervalAsMS * MINIMUM_BUCKETS;

const from = roundTimestamp(
timeframe && timeframe.start <= minimumFrom ? timeframe.start : minimumFrom,
timeUnit
);
const to = timeframe.end;
const from = timeframe.start;

const deliveryDelay = 60 * 1000; // INFO: This allows us to account for any delay ES has in indexing the most recent data.

Expand Down