Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions core/src/main/java/org/elasticsearch/common/joda/Joda.java
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@

import java.io.IOException;
import java.io.Writer;
import java.math.BigDecimal;
import java.util.Locale;

public class Joda {
Expand Down Expand Up @@ -331,7 +332,8 @@ public int estimateParsedLength() {
@Override
public int parseInto(DateTimeParserBucket bucket, String text, int position) {
boolean isPositive = text.startsWith("-") == false;
boolean isTooLong = text.length() > estimateParsedLength();
int firstDotIndex = text.indexOf((int)'.');
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I think we don't need the int cast here, it is done implicitely. At least my IDE removes it on "save"

boolean isTooLong = (firstDotIndex == -1 ? text.length() : firstDotIndex) > estimateParsedLength();

if (bucket.getZone() != DateTimeZone.UTC) {
String format = hasMilliSecondPrecision ? "epoch_millis" : "epoch_second";
Expand All @@ -342,7 +344,7 @@ public int parseInto(DateTimeParserBucket bucket, String text, int position) {

int factor = hasMilliSecondPrecision ? 1 : 1000;
try {
long millis = Long.valueOf(text) * factor;
long millis = new BigDecimal(text).longValue() * factor;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice, so this can handle all kinds of formats it seems.

DateTime dt = new DateTime(millis, DateTimeZone.UTC);
bucket.saveField(DateTimeFieldType.year(), dt.getYear());
bucket.saveField(DateTimeFieldType.monthOfYear(), dt.getMonthOfYear());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,26 @@ public void testThatEpochsCanBeParsed() {
}
}

public void testThatFloatEpochsCanBeParsed() {

long millisFromEpoch = randomNonNegativeLong();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since dates previous to epoch 0 can still be expressed as a long and may well be encountered if the user is indexing historical data should we test negative epoch values here too?


String epochFloatValue = String.format(Locale.US, "%d.%d", millisFromEpoch, randomNonNegativeLong());
FormatDateTimeFormatter formatter = Joda.forPattern("epoch_millis");

DateTime dateTime = formatter.parser().parseDateTime(epochFloatValue);

assertEquals(dateTime.getMillis(), millisFromEpoch);


epochFloatValue = String.format(Locale.US, "%d.%d", millisFromEpoch / 1000, randomNonNegativeLong());
formatter = Joda.forPattern("epoch_second");

dateTime = formatter.parser().parseDateTime(epochFloatValue);

assertEquals(dateTime.getMillis(), millisFromEpoch / 1000 * 1000);
}

public void testThatNegativeEpochsCanBeParsed() {
// problem: negative epochs can be arbitrary in size...
boolean parseMilliSeconds = randomBoolean();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@

import java.io.IOException;
import java.util.Collection;
import java.util.Locale;

import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.notNullValue;
Expand Down Expand Up @@ -214,6 +215,32 @@ public void testChangeFormat() throws IOException {
assertEquals(1457654400000L, pointField.numericValue().longValue());
}

public void testFloatEpochFormat() throws IOException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "date")
.field("format", "epoch_millis").endObject().endObject()
.endObject().endObject().string();

DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));

assertEquals(mapping, mapper.mappingSource().toString());

long millisFromEpoch = randomNonNegativeLong();
String epochFloatValue = String.format(Locale.US, "%d.%d", millisFromEpoch, randomNonNegativeLong());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe also randomly append a negative prefix to also test parsing negative values here?


ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field", epochFloatValue)
.endObject()
.bytes(),
XContentType.JSON));

IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
IndexableField pointField = fields[0];
assertEquals(millisFromEpoch, pointField.numericValue().longValue());
}

public void testChangeLocale() throws IOException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "date").field("locale", "fr").endObject().endObject()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,11 @@ public void doTestCoerce(String type) throws IOException {
IndexableField pointField = fields[1];
assertEquals(2, pointField.fieldType().pointDimensionCount());

// date_range ignores the coerce parameter and epoch_millis date format truncates floats (see issue: #14641)
if (type.equals("date_range")) {
return;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: maybe just personal preference, but early returns in test look strange to me. Can you change this to execute the rest of the test only for type.equals("date_range") == false)

}

mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", type).field("coerce", false).endObject().endObject()
.endObject().endObject();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1014,17 +1014,20 @@ public void testRangeWithFormatNumericValue() throws Exception {
assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L);
assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L);

// however, e-notation should and fractional parts provided as string
// should be parsed and error if not compatible
Exception e = expectThrows(Exception.class, () -> client().prepareSearch(indexName).setSize(0)
.addAggregation(dateRange("date_range").field("date").addRange("1.0e3", "3.0e3").addRange("3.0e3", "4.0e3")).get());
assertThat(e.getCause(), instanceOf(ElasticsearchParseException.class));
assertEquals("failed to parse date field [1.0e3] with format [epoch_second]", e.getCause().getMessage());

e = expectThrows(Exception.class, () -> client().prepareSearch(indexName).setSize(0)
.addAggregation(dateRange("date_range").field("date").addRange("1000.123", "3000.8").addRange("3000.8", "4000.3")).get());
assertThat(e.getCause(), instanceOf(ElasticsearchParseException.class));
assertEquals("failed to parse date field [1000.123] with format [epoch_second]", e.getCause().getMessage());
// also e-notation and floats provided as string also be truncated (see: #14641)
searchResponse = client().prepareSearch(indexName).setSize(0)
.addAggregation(dateRange("date_range").field("date").addRange("1.0e3", "3.0e3").addRange("3.0e3", "4.0e3")).get();
assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L));
buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2);
assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L);
assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great this works


searchResponse = client().prepareSearch(indexName).setSize(0)
.addAggregation(dateRange("date_range").field("date").addRange("1000.123", "3000.8").addRange("3000.8", "4000.3")).get();
assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L));
buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2);
assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L);
assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L);

// using different format should work when to/from is compatible with
// format in aggregation
Expand Down