|
19 | 19 |
|
20 | 20 | package org.elasticsearch.search.aggregations.bucket.histogram; |
21 | 21 |
|
22 | | -import org.apache.lucene.index.IndexReader; |
23 | | -import org.apache.lucene.index.LeafReaderContext; |
24 | | -import org.apache.lucene.index.SortedNumericDocValues; |
25 | | -import org.apache.lucene.search.DocIdSetIterator; |
26 | 22 | import org.elasticsearch.common.Rounding; |
27 | 23 | import org.elasticsearch.common.io.stream.StreamInput; |
28 | 24 | import org.elasticsearch.common.io.stream.StreamOutput; |
29 | 25 | import org.elasticsearch.common.unit.TimeValue; |
30 | 26 | import org.elasticsearch.common.xcontent.ObjectParser; |
31 | 27 | import org.elasticsearch.common.xcontent.XContentBuilder; |
32 | 28 | import org.elasticsearch.common.xcontent.XContentParser; |
33 | | -import org.elasticsearch.index.fielddata.IndexNumericFieldData; |
34 | | -import org.elasticsearch.index.fielddata.LeafNumericFieldData; |
35 | | -import org.elasticsearch.index.mapper.DateFieldMapper; |
36 | | -import org.elasticsearch.index.mapper.MappedFieldType; |
37 | | -import org.elasticsearch.index.mapper.MappedFieldType.Relation; |
38 | 29 | import org.elasticsearch.index.query.QueryShardContext; |
39 | 30 | import org.elasticsearch.search.aggregations.AggregationBuilder; |
40 | 31 | import org.elasticsearch.search.aggregations.AggregatorFactories; |
|
50 | 41 | import org.elasticsearch.search.aggregations.support.ValuesSourceType; |
51 | 42 |
|
52 | 43 | import java.io.IOException; |
53 | | -import java.time.Instant; |
54 | 44 | import java.time.ZoneId; |
55 | | -import java.time.ZoneOffset; |
56 | | -import java.time.zone.ZoneOffsetTransition; |
57 | 45 | import java.util.List; |
58 | 46 | import java.util.Map; |
59 | 47 | import java.util.Objects; |
@@ -401,144 +389,32 @@ public String getType() { |
401 | 389 | return NAME; |
402 | 390 | } |
403 | 391 |
|
404 | | - /** |
405 | | - * Returns a {@linkplain ZoneId} that functions the same as |
406 | | - * {@link #timeZone()} on the data in the shard referred to by |
407 | | - * {@code context}. It <strong>attempts</strong> to convert zones that |
408 | | - * have non-fixed offsets into fixed offset zones that produce the |
409 | | - * same results on all data in the shard. |
410 | | - * <p> |
411 | | - * We go about this in three phases: |
412 | | - * <ol> |
413 | | - * <li>A bunch of preflight checks to see if we *can* optimize it |
414 | | - * <li>Find the any Instant in shard |
415 | | - * <li>Find the DST transition before and after that Instant |
416 | | - * <li>Round those into the interval |
417 | | - * <li>Check if the rounded value include all values within shard |
418 | | - * <li>If they do then return a fixed offset time zone because it |
419 | | - * will return the same values for all time in the shard as the |
420 | | - * original time zone, but faster |
421 | | - * <li>Otherwise return the original time zone. It'll be slower, but |
422 | | - * correct. |
423 | | - * </ol> |
424 | | - * <p> |
425 | | - * NOTE: this can't be done in rewrite() because the timezone is then also used on the |
426 | | - * coordinating node in order to generate missing buckets, which may cross a transition |
427 | | - * even though data on the shards doesn't. |
428 | | - */ |
429 | | - ZoneId rewriteTimeZone(QueryShardContext context) throws IOException { |
430 | | - final ZoneId tz = timeZone(); |
431 | | - if (tz == null || tz.getRules().isFixedOffset()) { |
432 | | - // This time zone is already as fast as it is going to get. |
433 | | - return tz; |
434 | | - } |
435 | | - if (script() != null) { |
436 | | - // We can't be sure what dates the script will return so we don't attempt to optimize anything |
437 | | - return tz; |
438 | | - } |
439 | | - if (field() == null) { |
440 | | - // Without a field we're not going to be able to look anything up. |
441 | | - return tz; |
442 | | - } |
443 | | - MappedFieldType ft = context.fieldMapper(field()); |
444 | | - if (ft == null || false == ft instanceof DateFieldMapper.DateFieldType) { |
445 | | - // If the field is unmapped or not a date then we can't get its range. |
446 | | - return tz; |
447 | | - } |
448 | | - DateFieldMapper.DateFieldType dft = (DateFieldMapper.DateFieldType) ft; |
449 | | - final IndexReader reader = context.getIndexReader(); |
450 | | - if (reader == null) { |
451 | | - return tz; |
452 | | - } |
453 | | - |
454 | | - Instant instant = null; |
455 | | - final IndexNumericFieldData fieldData = context.getForField(ft); |
456 | | - for (LeafReaderContext ctx : reader.leaves()) { |
457 | | - LeafNumericFieldData leafFD = fieldData.load(ctx); |
458 | | - SortedNumericDocValues values = leafFD.getLongValues(); |
459 | | - if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { |
460 | | - instant = Instant.ofEpochMilli(values.nextValue()); |
461 | | - break; |
462 | | - } |
463 | | - } |
464 | | - if (instant == null) { |
465 | | - return tz; |
466 | | - } |
467 | | - |
468 | | - ZoneOffsetTransition prevOffsetTransition = tz.getRules().previousTransition(instant); |
469 | | - final long prevTransition; |
470 | | - if (prevOffsetTransition != null) { |
471 | | - prevTransition = prevOffsetTransition.getInstant().toEpochMilli(); |
472 | | - } else { |
473 | | - prevTransition = instant.toEpochMilli(); |
474 | | - } |
475 | | - ZoneOffsetTransition nextOffsetTransition = tz.getRules().nextTransition(instant); |
476 | | - final long nextTransition; |
477 | | - if (nextOffsetTransition != null) { |
478 | | - nextTransition = nextOffsetTransition.getInstant().toEpochMilli(); |
479 | | - } else { |
480 | | - nextTransition = Long.MAX_VALUE; // fixed time-zone after prevTransition |
481 | | - } |
482 | | - |
483 | | - // We need all not only values but also rounded values to be within |
484 | | - // [prevTransition, nextTransition]. |
485 | | - final long low; |
486 | | - |
487 | | - DateIntervalWrapper.IntervalTypeEnum intervalType = dateHistogramInterval.getIntervalType(); |
488 | | - if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.FIXED)) { |
489 | | - low = Math.addExact(prevTransition, dateHistogramInterval.tryIntervalAsFixedUnit().millis()); |
490 | | - } else if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.CALENDAR)) { |
491 | | - final Rounding.DateTimeUnit intervalAsUnit = dateHistogramInterval.tryIntervalAsCalendarUnit(); |
492 | | - final Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); |
493 | | - low = rounding.nextRoundingValue(prevTransition); |
494 | | - } else { |
495 | | - // We're not sure what the interval was originally (legacy) so use old behavior of assuming |
496 | | - // calendar first, then fixed. Required because fixed/cal overlap in places ("1h") |
497 | | - Rounding.DateTimeUnit intervalAsUnit = dateHistogramInterval.tryIntervalAsCalendarUnit(); |
498 | | - if (intervalAsUnit != null) { |
499 | | - final Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); |
500 | | - low = rounding.nextRoundingValue(prevTransition); |
501 | | - } else { |
502 | | - final TimeValue intervalAsMillis = dateHistogramInterval.tryIntervalAsFixedUnit(); |
503 | | - low = Math.addExact(prevTransition, intervalAsMillis.millis()); |
504 | | - } |
505 | | - } |
506 | | - // rounding rounds down, so 'nextTransition' is a good upper bound |
507 | | - final long high = nextTransition; |
508 | | - |
509 | | - if (dft.isFieldWithinRange( |
510 | | - reader, Instant.ofEpochMilli(low), Instant.ofEpochMilli(high - 1)) == Relation.WITHIN) { |
511 | | - // All values in this reader have the same offset despite daylight saving times. |
512 | | - // This is very common for location-based timezones such as Europe/Paris in |
513 | | - // combination with time-based indices. |
514 | | - return ZoneOffset.ofTotalSeconds(tz.getRules().getOffset(instant).getTotalSeconds()); |
515 | | - } |
516 | | - return tz; |
517 | | - } |
518 | | - |
519 | 392 | @Override |
520 | 393 | protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, |
521 | 394 | ValuesSourceConfig config, |
522 | 395 | AggregatorFactory parent, |
523 | 396 | AggregatorFactories.Builder subFactoriesBuilder) throws IOException { |
524 | 397 | final ZoneId tz = timeZone(); |
525 | 398 | final Rounding rounding = dateHistogramInterval.createRounding(tz, offset); |
526 | | - // TODO once we optimize TimeIntervalRounding we won't need to rewrite the time zone |
527 | | - final ZoneId rewrittenTimeZone = rewriteTimeZone(queryShardContext); |
528 | | - final Rounding shardRounding; |
529 | | - if (tz == rewrittenTimeZone) { |
530 | | - shardRounding = rounding; |
531 | | - } else { |
532 | | - shardRounding = dateHistogramInterval.createRounding(rewrittenTimeZone, offset); |
533 | | - } |
534 | 399 |
|
535 | 400 | ExtendedBounds roundedBounds = null; |
536 | 401 | if (this.extendedBounds != null) { |
537 | 402 | // parse any string bounds to longs and round |
538 | 403 | roundedBounds = this.extendedBounds.parseAndValidate(name, queryShardContext, config.format()).round(rounding); |
539 | 404 | } |
540 | | - return new DateHistogramAggregatorFactory(name, config, order, keyed, minDocCount, |
541 | | - rounding, shardRounding, roundedBounds, queryShardContext, parent, subFactoriesBuilder, metadata); |
| 405 | + return new DateHistogramAggregatorFactory( |
| 406 | + name, |
| 407 | + config, |
| 408 | + order, |
| 409 | + keyed, |
| 410 | + minDocCount, |
| 411 | + rounding, |
| 412 | + roundedBounds, |
| 413 | + queryShardContext, |
| 414 | + parent, |
| 415 | + subFactoriesBuilder, |
| 416 | + metadata |
| 417 | + ); |
542 | 418 | } |
543 | 419 |
|
544 | 420 | @Override |
|
0 commit comments