Skip to content

Commit

Permalink
Swap to using integers for extended bounds in the data histogram aggr…
Browse files Browse the repository at this point in the history
…egation (#61)

* Swap to using integers for extended bounds in the data histogram aggregation instead of strings so that alerts using _timesinceepoch continue to work

* Update docs

* Fixed tests
  • Loading branch information
kyle-sammons authored Oct 24, 2024
1 parent 130b15d commit 41a28e5
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 19 deletions.
9 changes: 4 additions & 5 deletions .github/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,10 @@ Astra datasource URL inside your Grafana instance.
mage -v
```

3. List all available Mage targets for additional commands:

```bash
mage -l
```
3. Move the binaries into the target directory
```bash
mv dist/gpx_slack_astra_app_datasource_backend_* dist/datasource
```

### Releases

Expand Down
3 changes: 3 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,6 @@ services:
GF_PATHS_PLUGINS: "/var/lib/grafana/plugins"
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: "slack-astra-app,slack-astra-app-backend-datasource"
GF_SERVER_ENABLE_GZIP: "true"
GF_UNIFIED_ALERTING_ENABLED: "false"
GF_ALERTING_ENABLED: "true"
GF_DATABASE_WAL: "true"
8 changes: 7 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,12 @@ require (
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
)

require github.com/grafana/opensearch-datasource v1.2.0
require (
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/grafana/opensearch-datasource v1.2.0
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 // indirect
github.com/sirupsen/logrus v1.6.0 // indirect
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect
)

replace golang.org/x/sys => golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c
2 changes: 2 additions & 0 deletions pkg/astra/astra.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ type AstraExecutor struct{}

var (
intervalCalculator tsdb.IntervalCalculator
_ backend.QueryDataHandler = (*AstraDatasource)(nil)
_ backend.CheckHealthHandler = (*AstraDatasource)(nil)
)

type TsdbQueryEndpoint interface {
Expand Down
3 changes: 1 addition & 2 deletions pkg/astra/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,6 @@ func (c *baseClientImpl) encodeBatchRequests(requests []*multiRequest) ([]byte,
body := string(reqBody)
body = strings.ReplaceAll(body, "$__interval_ms", strconv.FormatInt(r.interval.Milliseconds(), 10))
body = strings.ReplaceAll(body, "$__interval", r.interval.Text)

payload.WriteString(body + "\n")
}

Expand Down Expand Up @@ -464,7 +463,7 @@ func (c *baseClientImpl) executePPLQueryRequest(method, uriPath string, body []b
return nil, err
}

clientLog.Debug("Executing request", "url", req.URL.String(), "method", method)
clientLog.Debug("Executing PPL request", "url", req.URL.String(), "method", method)

var reqInfo *PPLRequestInfo
if c.debugEnabled {
Expand Down
6 changes: 3 additions & 3 deletions pkg/astra/client/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ type SearchResponse struct {
Error map[string]interface{} `json:"error"`
Aggregations map[string]interface{} `json:"aggregations"`
Hits *SearchResponseHits `json:"hits"`
Shards map[string]interface{} `json:"_shards"`
Shards map[string]interface{} `json:"_shards"`
}

// MultiSearchRequest represents a multi search request
Expand Down Expand Up @@ -271,8 +271,8 @@ type TermsAggregation struct {

// ExtendedBounds represents extended bounds
type ExtendedBounds struct {
Min string `json:"min"`
Max string `json:"max"`
Min int64 `json:"min"`
Max int64 `json:"max"`
}

// GeoHashGridAggregation represents a geo hash grid aggregation
Expand Down
4 changes: 2 additions & 2 deletions pkg/astra/lucene_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (h *luceneHandler) processQuery(q *Query) error {
for _, bucketAgg := range q.BucketAggs {
switch bucketAgg.Type {
case dateHistType:
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, int64(fromMs), int64(toMs))
case histogramType:
aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
case filtersType:
Expand Down Expand Up @@ -168,7 +168,7 @@ func (h *luceneHandler) executeQueries() (*backend.QueryDataResponse, error) {
return rp.getTimeSeries()
}

func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo int64) es.AggBuilder {
aggBuilder.DateHistogram(bucketAgg.ID, bucketAgg.Field, func(a *es.DateHistogramAgg, b es.AggBuilder) {
a.Interval = bucketAgg.Settings.Get("interval").MustString("auto")
a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
Expand Down
14 changes: 8 additions & 6 deletions pkg/astra/time_series_query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ import (
func TestExecuteTimeSeriesQuery(t *testing.T) {
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
fromMs := from.UnixNano() / int64(time.Millisecond)
toMs := to.UnixNano() / int64(time.Millisecond)
fromStr := fmt.Sprintf("%d", fromMs)
toStr := fmt.Sprintf("%d", toMs)

Convey("Test execute time series query", t, func() {
Convey("With defaults on Elasticsearch 2.0.0", func() {
Expand All @@ -36,8 +38,8 @@ func TestExecuteTimeSeriesQuery(t *testing.T) {
So(sr.Aggs[0].Key, ShouldEqual, "2")
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
So(dateHistogramAgg.Field, ShouldEqual, "@timestamp")
So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromStr)
So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toStr)
So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromMs)
So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toMs)
})

Convey("With defaults on Elasticsearch 5.0.0", func() {
Expand All @@ -51,8 +53,8 @@ func TestExecuteTimeSeriesQuery(t *testing.T) {
sr := c.multisearchRequests[0].Requests[0]
So(sr.Query.Bool.Filters[0].(*es.RangeFilter).Key, ShouldEqual, c.timeField)
So(sr.Aggs[0].Key, ShouldEqual, "2")
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromStr)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toStr)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromMs)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toMs)
})

Convey("With multiple bucket aggs", func() {
Expand Down

0 comments on commit 41a28e5

Please sign in to comment.