-
Notifications
You must be signed in to change notification settings - Fork 25.9k
Incremental bulk integration with rest layer #112154
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
b5a4054
a75b31d
4063a91
e26df4a
fbfcbb5
3c8d8c0
2b3bcd5
1011086
247efaf
861c27f
d3bbac1
1bdb92d
6fa8109
c007b5a
74d69cf
13ef3f5
b303cc7
0b9362e
4b3aab5
aba3eb3
16cfc81
89c76d8
3dc16ce
9236611
4c1b20c
321377a
b5308c3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,6 +36,7 @@ | |
| import org.apache.logging.log4j.LogManager; | ||
| import org.apache.logging.log4j.Logger; | ||
| import org.elasticsearch.ExceptionsHelper; | ||
| import org.elasticsearch.action.bulk.IncrementalBulkService; | ||
| import org.elasticsearch.common.network.CloseableChannel; | ||
| import org.elasticsearch.common.network.NetworkService; | ||
| import org.elasticsearch.common.network.ThreadWatchdog; | ||
|
|
@@ -96,6 +97,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { | |
| private final TLSConfig tlsConfig; | ||
| private final AcceptChannelHandler.AcceptPredicate acceptChannelPredicate; | ||
| private final HttpValidator httpValidator; | ||
| private final IncrementalBulkService.Enabled enabled; | ||
| private final ThreadWatchdog threadWatchdog; | ||
| private final int readTimeoutMillis; | ||
|
|
||
|
|
@@ -134,6 +136,7 @@ public Netty4HttpServerTransport( | |
| this.acceptChannelPredicate = acceptChannelPredicate; | ||
| this.httpValidator = httpValidator; | ||
| this.threadWatchdog = networkService.getThreadWatchdog(); | ||
| this.enabled = new IncrementalBulkService.Enabled(clusterSettings); | ||
|
|
||
| this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); | ||
|
|
||
|
|
@@ -279,7 +282,7 @@ public void onException(HttpChannel channel, Exception cause) { | |
| } | ||
|
|
||
| public ChannelHandler configureServerChannelHandler() { | ||
| return new HttpChannelHandler(this, handlingSettings, tlsConfig, acceptChannelPredicate, httpValidator); | ||
| return new HttpChannelHandler(this, handlingSettings, tlsConfig, acceptChannelPredicate, httpValidator, enabled); | ||
| } | ||
|
|
||
| static final AttributeKey<Netty4HttpChannel> HTTP_CHANNEL_KEY = AttributeKey.newInstance("es-http-channel"); | ||
|
|
@@ -292,19 +295,22 @@ protected static class HttpChannelHandler extends ChannelInitializer<Channel> { | |
| private final TLSConfig tlsConfig; | ||
| private final BiPredicate<String, InetSocketAddress> acceptChannelPredicate; | ||
| private final HttpValidator httpValidator; | ||
| private final IncrementalBulkService.Enabled enabled; | ||
|
|
||
| protected HttpChannelHandler( | ||
| final Netty4HttpServerTransport transport, | ||
| final HttpHandlingSettings handlingSettings, | ||
| final TLSConfig tlsConfig, | ||
| @Nullable final BiPredicate<String, InetSocketAddress> acceptChannelPredicate, | ||
| @Nullable final HttpValidator httpValidator | ||
| @Nullable final HttpValidator httpValidator, | ||
| IncrementalBulkService.Enabled enabled | ||
| ) { | ||
| this.transport = transport; | ||
| this.handlingSettings = handlingSettings; | ||
| this.tlsConfig = tlsConfig; | ||
| this.acceptChannelPredicate = acceptChannelPredicate; | ||
| this.httpValidator = httpValidator; | ||
| this.enabled = enabled; | ||
| } | ||
|
|
||
| @Override | ||
|
|
@@ -365,7 +371,13 @@ protected HttpMessage createMessage(String[] initialLine) throws Exception { | |
| ); | ||
| } | ||
| // combines the HTTP message pieces into a single full HTTP request (with headers and body) | ||
| final HttpObjectAggregator aggregator = new Netty4HttpAggregator(handlingSettings.maxContentLength()); | ||
| final HttpObjectAggregator aggregator = new Netty4HttpAggregator( | ||
| handlingSettings.maxContentLength(), | ||
| httpPreRequest -> enabled.get() == false | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am not entirely sure of the importance of this being in sync with the enabled flag checked in the rest-bulk-handler. However, it seems they could flip at different times, since we register these independently and they each add a listener for cluster settings. I wonder if we can fix that. I realize it is only an issue when turning it on/off, but would be nice to fix anyway.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added a comment to the feature flag ticket and will address it as a follow-up. I agree with the concern. |
||
| || (httpPreRequest.uri().contains("_bulk") == false | ||
| || httpPreRequest.uri().contains("_bulk_update") | ||
| || httpPreRequest.uri().contains("/_xpack/monitoring/_bulk")) | ||
| ); | ||
| aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); | ||
| ch.pipeline() | ||
| .addLast("decoder_compress", new HttpContentDecompressor()) // this handles request body decompression | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,117 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the Elastic License | ||
| * 2.0 and the Server Side Public License, v 1; you may not use this file except | ||
| * in compliance with, at your election, the Elastic License 2.0 or the Server | ||
| * Side Public License, v 1. | ||
| */ | ||
|
|
||
| package org.elasticsearch.http; | ||
|
|
||
| import org.elasticsearch.client.Request; | ||
| import org.elasticsearch.client.Response; | ||
| import org.elasticsearch.client.ResponseException; | ||
| import org.elasticsearch.common.xcontent.XContentHelper; | ||
| import org.elasticsearch.test.ESIntegTestCase; | ||
| import org.elasticsearch.xcontent.json.JsonXContent; | ||
|
|
||
| import java.io.IOException; | ||
| import java.util.List; | ||
| import java.util.Map; | ||
|
|
||
| import static org.elasticsearch.rest.RestStatus.OK; | ||
| import static org.hamcrest.Matchers.equalTo; | ||
|
|
||
| @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) | ||
| public class IncrementalBulkRestIT extends HttpSmokeTestCase { | ||
|
|
||
| @SuppressWarnings("unchecked") | ||
| public void testIncrementalBulk() throws IOException { | ||
| Request createRequest = new Request("PUT", "/index_name"); | ||
| createRequest.setJsonEntity(""" | ||
| { | ||
| "settings": { | ||
| "index": { | ||
| "number_of_shards": 1, | ||
| "number_of_replicas": 1, | ||
| "write.wait_for_active_shards": 2 | ||
| } | ||
| } | ||
| }"""); | ||
| final Response indexCreatedResponse = getRestClient().performRequest(createRequest); | ||
| assertThat(indexCreatedResponse.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); | ||
|
|
||
| Request firstBulkRequest = new Request("POST", "/index_name/_bulk"); | ||
|
|
||
| // index documents for the rollup job | ||
| String bulkBody = "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n" | ||
| + "{\"field\":1}\n" | ||
| + "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n" | ||
| + "{\"field\":1}\n" | ||
| + "\r\n"; | ||
|
|
||
| firstBulkRequest.setJsonEntity(bulkBody); | ||
|
|
||
| final Response indexSuccessFul = getRestClient().performRequest(firstBulkRequest); | ||
| assertThat(indexSuccessFul.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); | ||
|
|
||
| Request bulkRequest = new Request("POST", "/index_name/_bulk"); | ||
|
|
||
| // index documents for the rollup job | ||
| final StringBuilder bulk = new StringBuilder(); | ||
| bulk.append("{\"delete\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"); | ||
| int updates = 0; | ||
| for (int i = 0; i < 1000; i++) { | ||
| bulk.append("{\"index\":{\"_index\":\"index_name\"}}\n"); | ||
| bulk.append("{\"field\":").append(i).append("}\n"); | ||
| if (randomBoolean() && randomBoolean() && randomBoolean() && randomBoolean()) { | ||
| ++updates; | ||
| bulk.append("{\"update\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n"); | ||
| bulk.append("{\"doc\":{\"field\":").append(i).append("}}\n"); | ||
| } | ||
| } | ||
| bulk.append("\r\n"); | ||
|
|
||
| bulkRequest.setJsonEntity(bulk.toString()); | ||
|
|
||
| final Response bulkResponse = getRestClient().performRequest(bulkRequest); | ||
| assertThat(bulkResponse.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); | ||
| Map<String, Object> responseMap = XContentHelper.convertToMap( | ||
| JsonXContent.jsonXContent, | ||
| bulkResponse.getEntity().getContent(), | ||
| true | ||
| ); | ||
|
|
||
| assertFalse((Boolean) responseMap.get("errors")); | ||
| assertThat(((List<Object>) responseMap.get("items")).size(), equalTo(1001 + updates)); | ||
| } | ||
|
|
||
| public void testIncrementalMalformed() throws IOException { | ||
| Request createRequest = new Request("PUT", "/index_name"); | ||
| createRequest.setJsonEntity(""" | ||
| { | ||
| "settings": { | ||
| "index": { | ||
| "number_of_shards": 1, | ||
| "number_of_replicas": 1, | ||
| "write.wait_for_active_shards": 2 | ||
| } | ||
| } | ||
| }"""); | ||
| final Response indexCreatedResponse = getRestClient().performRequest(createRequest); | ||
| assertThat(indexCreatedResponse.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); | ||
|
|
||
| Request bulkRequest = new Request("POST", "/index_name/_bulk"); | ||
|
|
||
| // index documents for the rollup job | ||
| final StringBuilder bulk = new StringBuilder(); | ||
| bulk.append("{\"index\":{\"_index\":\"index_name\"}}\n"); | ||
| bulk.append("{\"field\":1}\n"); | ||
| bulk.append("{}\n"); | ||
| bulk.append("\r\n"); | ||
|
|
||
| bulkRequest.setJsonEntity(bulk.toString()); | ||
|
|
||
| expectThrows(ResponseException.class, () -> getRestClient().performRequest(bulkRequest)); | ||
| } | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I have this change in my draft of HttpObjectAggregator removal. It should address
no-contentpartially, for known length I use content-length header, for chunked there is no way to tell so it's always true.https://github.com/elastic/elasticsearch/pull/112120/files#diff-b6d89d18f95a49d731741f926ee22d01f0dd8039de82382e68c1862e19f22b04R282-R297