Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
import org.apache.iceberg.FileFormat;
Expand All @@ -47,6 +48,7 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;

Expand Down Expand Up @@ -134,6 +136,9 @@ public static Collection<Object[]> parameters() {
@Rule
public TemporaryFolder temp = new TemporaryFolder();

@Rule
public Timeout timeout = new Timeout(40000, TimeUnit.MILLISECONDS);

@BeforeClass
public static void beforeClass() {
shell = HiveIcebergStorageHandlerTestUtils.shell();
Expand Down Expand Up @@ -243,7 +248,7 @@ public void testCBOWithSelfJoin() throws IOException {
Assert.assertArrayEquals(new Object[] {102L, 1L, 33.33d}, rows.get(2));
}

@Test
@Test(timeout = 100000)
public void testJoinTablesSupportedTypes() throws IOException {
for (int i = 0; i < SUPPORTED_TYPES.size(); i++) {
Type type = SUPPORTED_TYPES.get(i);
Expand All @@ -266,7 +271,7 @@ public void testJoinTablesSupportedTypes() throws IOException {
}
}

@Test
@Test(timeout = 100000)
public void testSelectDistinctFromTable() throws IOException {
for (int i = 0; i < SUPPORTED_TYPES.size(); i++) {
Type type = SUPPORTED_TYPES.get(i);
Expand Down Expand Up @@ -309,7 +314,7 @@ public void testInsert() throws IOException {
HiveIcebergTestUtils.validateData(table, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 0);
}

@Test
@Test(timeout = 100000)
public void testInsertSupportedTypes() throws IOException {
Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr"));
for (int i = 0; i < SUPPORTED_TYPES.size(); i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ public void setHiveSessionValue(String key, boolean value) {

public void start() {
// Create a copy of the HiveConf for the metastore
metastore.start(new HiveConf(hs2Conf));
metastore.start(new HiveConf(hs2Conf), 10);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we know why the pool is exhausted? In the past, we had a few leaks in the Spark catalog code which led to this. It can be also a valid use case too if we simply need a larger pool.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe, worth creating an issue if we want to take a look later.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or is it related to the recent change around how we manage the pool?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have 2 ideas:

  • Maybe this was caused by the Hive: Configure catalog type on table level. #2129 where we start to handle / cache the catalogs differently
  • Or we added some more queries and Hive creates / caches HMSClients and we might ended up using a different codepath.

Created #2474

hs2Conf.setVar(HiveConf.ConfVars.METASTOREURIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREURIS));
hs2Conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE));
Expand Down