Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree;
Expand Down Expand Up @@ -171,24 +172,15 @@ private static BigDecimal hiveDecimalToBigDecimal(HiveDecimalWritable hiveDecima
return hiveDecimalWritable.getHiveDecimal().bigDecimalValue().setScale(hiveDecimalWritable.scale());
}

// Hive uses `java.sql.Date.valueOf(lit.toString());` to convert a literal to Date
// Which uses `java.util.Date()` internally to create the object and that uses the TimeZone.getDefaultRef()
// To get back the expected date we have to use the LocalDate which gets rid of the TimeZone misery as it uses
// the year/month/day to generate the object
private static int daysFromDate(Date date) {
return DateTimeUtil.daysFromDate(date.toLocalDate());
return DateTimeUtil.daysFromInstant(Instant.ofEpochMilli(date.getTime()));
}

// Hive uses `java.sql.Timestamp.valueOf(lit.toString());` to convert a literal to Timestamp
// Which again uses `java.util.Date()` internally to create the object which uses the TimeZone.getDefaultRef()
// To get back the expected timestamp we have to use the LocalDateTime which gets rid of the TimeZone misery
// as it uses the year/month/day/hour/min/sec/nanos to generate the object
private static int daysFromTimestamp(Timestamp timestamp) {
return DateTimeUtil.daysFromDate(timestamp.toLocalDateTime().toLocalDate());
return DateTimeUtil.daysFromInstant(timestamp.toInstant());
}

// We have to use the LocalDateTime to get the micros. See the comment above.
private static long microsFromTimestamp(Timestamp timestamp) {
return DateTimeUtil.microsFromTimestamp(timestamp.toLocalDateTime());
return DateTimeUtil.microsFromInstant(timestamp.toInstant());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
import org.apache.iceberg.data.DeleteFilter;
import org.apache.iceberg.data.GenericDeleteFilter;
import org.apache.iceberg.data.IdentityPartitionConverters;
import org.apache.iceberg.data.InternalRecordWrapper;
import org.apache.iceberg.data.avro.DataReader;
import org.apache.iceberg.data.orc.GenericOrcReader;
import org.apache.iceberg.data.parquet.GenericParquetReaders;
Expand Down Expand Up @@ -285,11 +284,8 @@ private CloseableIterable<T> applyResidualFiltering(CloseableIterable<T> iter, E
boolean applyResidual = !context.getConfiguration().getBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, false);

if (applyResidual && residual != null && residual != Expressions.alwaysTrue()) {
// Date and timestamp values are not the correct type for Evaluator.
// Wrapping to return the expected type.
InternalRecordWrapper wrapper = new InternalRecordWrapper(readSchema.asStruct());
Evaluator filter = new Evaluator(readSchema.asStruct(), residual, caseSensitive);
return CloseableIterable.filter(iter, record -> filter.eval(wrapper.wrap((StructLike) record)));
return CloseableIterable.filter(iter, record -> filter.eval((StructLike) record));
} else {
return iter;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ public void testBooleanType() {
@Test
public void testDateType() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
Date gmtDate = Date.valueOf(LocalDate.of(2015, 11, 12));
Date gmtDate = new Date(LocalDate.of(2015, 11, 12).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli());
SearchArgument arg = builder.startAnd().equals("date", PredicateLeaf.Type.DATE, gmtDate).end().build();

UnboundPredicate expected = Expressions.equal("date", Literal.of("2015-11-12").to(Types.DateType.get()).value());
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,11 @@ public void before() throws IOException {

@After
public void after() throws Exception {
HiveIcebergStorageHandlerTestUtils.close(shell);
shell.closeSession();
shell.metastore().reset();
// HiveServer2 thread pools are using thread local Hive -> HMSClient objects. These are not cleaned up when the
// HiveServer2 is stopped. Only Finalizer closes the HMS connections.
System.gc();
// Mixing mr and tez jobs within the same JVM can cause problems. Mr jobs set the ExecMapper status to done=false
// at the beginning and to done=true at the end. However, tez jobs also rely on this value to see if they should
// proceed, but they do not reset it to done=false at the beginning. Therefore, without calling this after each test
Expand Down