Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

import org.json.JSONObject;
import org.junit.Assume;
import org.junit.Ignore;
import org.junit.Test;
import org.opensearch.sql.ppl.PPLIntegTestCase;
import org.opensearch.sql.util.Retry;
Expand Down Expand Up @@ -144,21 +143,19 @@ public void testQ3() throws IOException {
rows(4423, 3055.9365, "1995-02-17 00:00:00", 0));
}

// TODO: Aggregation push down has a hard-coded limit of 1000 buckets for output, so this query
// will not return the correct results with aggregation push down and it's unstable
@Ignore
@Test
public void testQ4() throws IOException {
String ppl = sanitize(loadFromFile("tpch/queries/q4.ppl"));
JSONObject actual = executeQuery(ppl);
verifySchemaInOrder(
actual, schema("o_orderpriority", "string"), schema("order_count", "bigint"));
verifyDataRows(
actual,
rows("1-URGENT", 7),
rows("1-URGENT", 9),
rows("2-HIGH", 7),
rows("3-MEDIUM", 4),
rows("4-NOT SPECIFIED", 7),
rows("5-LOW", 10));
rows("3-MEDIUM", 9),
rows("4-NOT SPECIFIED", 8),
rows("5-LOW", 12));
}

@Test
Expand Down Expand Up @@ -407,7 +404,6 @@ public void testQ18() throws IOException {
verifyNumOfRows(actual, 0);
}

@Ignore("This IT is easily flaky failure in 2.19.0, but more stable after 2.19.3")
@Test
public void testQ19() throws IOException {
String ppl = sanitize(loadFromFile("tpch/queries/q19.ppl"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,19 @@ protected synchronized void loadIndex(Index index, RestClient client) throws IOE
createIndexByRestClient(client, indexName, mapping);
loadDataByRestClient(client, indexName, dataSet);
}
// loadIndex() could directly return when isIndexExist()=true,
// e.g. the index is created in the cluster but data hasn't been flushed.
// We block loadIndex() until data loaded to resolve
// https://github.com/opensearch-project/sql/issues/4261
int countDown = 3; // 1500ms timeout
while (countDown != 0 && getDocCount(client, indexName) == 0) {
try {
Thread.sleep(500);
countDown--;
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}

protected synchronized void loadIndex(Index index) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,21 @@ public static void loadDataByRestClient(
performRequest(client, request);
}

/**
* Return how many docs in the index
*
* @param client client connection
* @param indexName index name
* @return doc count of the index
* @throws IOException
*/
public static int getDocCount(RestClient client, String indexName) throws IOException {
Request request = new Request("GET", "/" + indexName + "/_count");
Response response = performRequest(client, request);
JSONObject jsonObject = new JSONObject(getResponseBody(response));
return jsonObject.getInt("count");
}

/**
* Perform a request by REST client.
*
Expand Down
Loading