diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java index b7201deb4057d..875ba7f96da2f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java @@ -32,7 +32,8 @@ * allocated and available. *

* This is different from a {@link RecyclerBytesStreamOutput} which only uses recycled 16kiB pages and never itself allocates a raw - * {@code byte[]}. + * {@code byte[]}. However, note that by default a {@link ReleasableBytesStreamOutput} uses {@link PageCacheRecycler#PAGE_SIZE_IN_BYTES} + * for its {@code expectedSize} so that it also always starts by using a recycled page rather than a slow-growing fresh {@code byte[]}. *

* The resulting {@link ReleasableBytesReference} is a view over the underlying {@code byte[]} pages and involves no significant extra * allocation to obtain. It is oversized: The worst case for overhead is when the data is one byte more than a 16kiB page and therefore the @@ -47,10 +48,23 @@ */ public class ReleasableBytesStreamOutput extends BytesStreamOutput implements Releasable { + /** + * Create a {@link ReleasableBytesStreamOutput}, acquiring from the given {@link BigArrays} a single recycled page for the initial + * buffer, and growing the buffer as needed. + */ public ReleasableBytesStreamOutput(BigArrays bigarrays) { this(PageCacheRecycler.PAGE_SIZE_IN_BYTES, bigarrays); } + /** + * Create a {@link ReleasableBytesStreamOutput}, allocating an initial buffer of size {@code expectedSize} from the given + * {@link BigArrays}, and growing the buffer as needed. + *

+ * Note that if {@code expectedSize < PageCacheRecycler.PAGE_SIZE_IN_BYTES / 2} then this will allocate a {@code new byte[]} rather than + * using a recycled page, and will keep on allocating {@code new byte[]} instances, copying the contents, until the contents reach + * {@code PageCacheRecycler.PAGE_SIZE_IN_BYTES / 2}. In the worst case this can be over 40 allocations before it gets big enough to + * start using recycled pages. This is probably not what you want. + */ public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) { super(expectedSize, bigArrays); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 19d65ff38755c..11c945da9d7b7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -291,7 +291,7 @@ private IndicesAliasesRequestBuilder updateAliasIndices(Set currentAlias } private void updateSearchApplication(SearchApplication app, boolean create, ActionListener listener) { - try (ReleasableBytesStreamOutput buffer = new ReleasableBytesStreamOutput(0, bigArrays.withCircuitBreaking())) { + try (ReleasableBytesStreamOutput buffer = new ReleasableBytesStreamOutput(bigArrays.withCircuitBreaking())) { try (XContentBuilder source = XContentFactory.jsonBuilder(buffer)) { source.startObject() .field(SearchApplication.NAME_FIELD.getPreferredName(), app.name())