diff --git a/integ-test/src/test/java/org/opensearch/sql/calcite/tpch/CalcitePPLTpchIT.java b/integ-test/src/test/java/org/opensearch/sql/calcite/tpch/CalcitePPLTpchIT.java index 18e7246cc63..d83140e0dd5 100644 --- a/integ-test/src/test/java/org/opensearch/sql/calcite/tpch/CalcitePPLTpchIT.java +++ b/integ-test/src/test/java/org/opensearch/sql/calcite/tpch/CalcitePPLTpchIT.java @@ -17,7 +17,6 @@ import java.util.Locale; import org.json.JSONObject; import org.junit.Assume; -import org.junit.Ignore; import org.junit.Test; import org.opensearch.sql.ppl.PPLIntegTestCase; import org.opensearch.sql.util.Retry; @@ -143,9 +142,7 @@ public void testQ3() throws IOException { rows(4423, 3055.9365, "1995-02-17 00:00:00", 0)); } - // TODO: Aggregation push down has a hard-coded limit of 1000 buckets for output, so this query - // will not return the correct results with aggregation push down and it's unstable - @Ignore + @Test public void testQ4() throws IOException { String ppl = sanitize(loadFromFile("tpch/queries/q4.ppl")); JSONObject actual = executeQuery(ppl); @@ -153,11 +150,11 @@ public void testQ4() throws IOException { actual, schema("o_orderpriority", "string"), schema("order_count", "bigint")); verifyDataRows( actual, - rows("1-URGENT", 7), + rows("1-URGENT", 9), rows("2-HIGH", 7), - rows("3-MEDIUM", 4), - rows("4-NOT SPECIFIED", 7), - rows("5-LOW", 10)); + rows("3-MEDIUM", 9), + rows("4-NOT SPECIFIED", 8), + rows("5-LOW", 12)); } @Test diff --git a/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java b/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java index 47632dbc942..50ee11b765a 100644 --- a/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java +++ b/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java @@ -210,6 +210,19 @@ protected synchronized void loadIndex(Index index, RestClient client) throws IOE createIndexByRestClient(client, indexName, mapping); loadDataByRestClient(client, indexName, dataSet); } + // loadIndex() could directly return when isIndexExist()=true, + // e.g. the index is created in the cluster but data hasn't been flushed. + // We block loadIndex() until data loaded to resolve + // https://github.com/opensearch-project/sql/issues/4261 + int countDown = 3; // 1500ms timeout + while (countDown != 0 && getDocCount(client, indexName) == 0) { + try { + Thread.sleep(500); + countDown--; + } catch (InterruptedException e) { + throw new IOException(e); + } + } } protected synchronized void loadIndex(Index index) throws IOException { diff --git a/integ-test/src/test/java/org/opensearch/sql/legacy/TestUtils.java b/integ-test/src/test/java/org/opensearch/sql/legacy/TestUtils.java index a94e89ec0e6..2ac1763836e 100644 --- a/integ-test/src/test/java/org/opensearch/sql/legacy/TestUtils.java +++ b/integ-test/src/test/java/org/opensearch/sql/legacy/TestUtils.java @@ -104,6 +104,21 @@ public static void loadDataByRestClient( performRequest(client, request); } + /** + * Return how many docs in the index + * + * @param client client connection + * @param indexName index name + * @return doc count of the index + * @throws IOException + */ + public static int getDocCount(RestClient client, String indexName) throws IOException { + Request request = new Request("GET", "/" + indexName + "/_count"); + Response response = performRequest(client, request); + JSONObject jsonObject = new JSONObject(getResponseBody(response)); + return jsonObject.getInt("count"); + } + /** * Perform a request by REST client. *