diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java index 4ade83728b73b..2d71baf360b1a 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.ConstantIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +30,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding constant integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeConstantIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "15" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeConstantIntegerBenchmark() { this.decode = new DecodeBenchmark(); @@ -49,16 +54,17 @@ public DecodeConstantIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java index f41ae3a912a94..ab1887ed2088f 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +30,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding decreasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeDecreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeDecreasingIntegerBenchmark() { this.decode = new DecodeBenchmark(); @@ -49,16 +54,17 @@ public DecodeDecreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java index 40d0d41b4aefc..5655210bc9f18 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.IncreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,38 +30,41 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding increasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeIncreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeIncreasingIntegerBenchmark() { this.decode = new DecodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); - + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java index 7eea2d9d7a70e..617e816a6754c 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.NonSortedIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +30,41 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding non-sorted integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeNonSortedIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeNonSortedIntegerBenchmark() { this.decode = new DecodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java index 47579b3acd410..7fe1cd755b35b 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.ConstantIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +31,58 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding constant integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeConstantIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "15" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeConstantIntegerBenchmark() { this.encode = new EncodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *

Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java index 73d0e13a76a77..2fe7f75c7baf3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecreasingIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +31,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding decreasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeDecreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeDecreasingIntegerBenchmark() { this.encode = new EncodeBenchmark(); @@ -49,16 +55,34 @@ public EncodeDecreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *

Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java index f9e164972bebb..699fdbac349b1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.IncreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +31,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding increasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeIncreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeIncreasingIntegerBenchmark() { this.encode = new EncodeBenchmark(); @@ -49,16 +55,34 @@ public EncodeIncreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *

Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java index f195228a723e5..413bbbdb422c7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.NonSortedIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +31,58 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding non-sorted integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeNonSortedIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeNonSortedIntegerBenchmark() { this.encode = new EncodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *

Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java deleted file mode 100644 index 53723f05728b5..0000000000000 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.benchmark.index.codec.tsdb.internal; - -import org.elasticsearch.index.codec.tsdb.DocValuesForUtil; -import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; -import org.openjdk.jmh.infra.Blackhole; - -import java.io.IOException; -import java.util.function.Supplier; - -public abstract class AbstractDocValuesForUtilBenchmark { - protected final DocValuesForUtil forUtil; - protected final int blockSize; - - public AbstractDocValuesForUtilBenchmark() { - this.forUtil = new DocValuesForUtil(ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE); - this.blockSize = ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; - } - - public abstract void benchmark(int bitsPerValue, Blackhole bh) throws IOException; - - public abstract void setupIteration(int bitsPerValue, final Supplier arraySupplier) throws IOException; - - public abstract void setupInvocation(int bitsPerValue) throws IOException; -} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractTSDBCodecBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractTSDBCodecBenchmark.java new file mode 100644 index 0000000000000..03fff36c7e07e --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractTSDBCodecBenchmark.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.benchmark.index.codec.tsdb.internal; + +import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; +import org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * Base class for TSDB codec encode/decode benchmarks using the Template Method pattern. + * + *

This abstract class provides the common structure for benchmarking the + * {@link TSDBDocValuesEncoder}. It uses the Template Method pattern where: + *

+ * + * @see EncodeBenchmark + * @see DecodeBenchmark + */ +public abstract class AbstractTSDBCodecBenchmark { + + /** + * Extra bytes allocated beyond the raw data size to accommodate encoding metadata. + * + *

The {@link TSDBDocValuesEncoder} writes metadata alongside bit-packed data for each + * encoding step (delta, offset, GCD). This buffer headroom ensures we never overflow + * during encoding. The theoretical maximum is ~32 bytes; we use 64 for safety. + */ + protected static final int EXTRA_METADATA_SIZE = 64; + + /** The encoder instance used for all encode/decode operations. */ + protected final TSDBDocValuesEncoder encoder; + + /** Number of values per block (typically 128). */ + protected final int blockSize; + + /** + * Creates a new benchmark instance with the standard TSDB block size. + */ + public AbstractTSDBCodecBenchmark() { + this.blockSize = ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; + this.encoder = new TSDBDocValuesEncoder(blockSize); + } + + /** + * Executes the core encode or decode operation. An encoder would normally implement the + * encoding logic while a decoder would implement the decoding logic. + * + * @throws IOException if encoding or decoding fails + */ + public abstract void run() throws IOException; + + /** + * Returns the output expected to be consumed by the JMH blackhole. + * + * @return the benchmark output (encoded bytes or decoded values) + */ + protected abstract Object getOutput(); + + /** + * Template method that runs the operation and consumes the result (encode or decode). + * This is the method called by JMH during benchmark iterations. + * + * @param bh the JMH blackhole for consuming results + * @throws IOException if the operation fails + */ + public void benchmark(Blackhole bh) throws IOException { + run(); + bh.consume(getOutput()); + } + + /** + * Sets up state for a new benchmark trial (once per parameter combination). + * Called once at the start of each parameter combination to initialize input data. + * + * @param arraySupplier supplier that generates the input array for this trial + * @throws IOException if setup fails + */ + public abstract void setupTrial(Supplier arraySupplier) throws IOException; + + /** + * Sets up state for a new benchmark iteration. + * Called once per iteration. Default implementation does nothing. + * + * @throws IOException if setup fails + */ + public void setupIteration() throws IOException { + // Default: no per-iteration setup needed + } + + /** + * Resets state before each benchmark invocation. + * Called before every single operation to reset buffers and initialize state. + * + * @throws IOException if reset fails + */ + public abstract void setupInvocation() throws IOException; + + /** + * Returns the number of values per encoded block. + * + * @return the block size. + */ + public int getBlockSize() { + return blockSize; + } + + /** + * Returns the number of bytes produced by the last encode operation. + * + * @return encoded size in bytes + */ + public abstract int getEncodedSize(); +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/CompressionMetrics.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/CompressionMetrics.java new file mode 100644 index 0000000000000..eb124216e78bc --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/CompressionMetrics.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.benchmark.index.codec.tsdb.internal; + +import org.openjdk.jmh.annotations.AuxCounters; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; + +/** + * JMH auxiliary counters for tracking compression efficiency in TSDB codec benchmarks. + * + *

This class uses JMH's {@link AuxCounters} feature to report compression metrics + * alongside timing data. Metrics are accumulated during benchmark operations and + * computed at iteration teardown. + * + *

Usage

+ *
{@code
+ * @Benchmark
+ * public void benchmark(Blackhole bh, CompressionMetrics metrics) {
+ *     encode.benchmark(bh);
+ *     metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue);
+ * }
+ * }
+ */ +@AuxCounters(AuxCounters.Type.EVENTS) +@State(Scope.Thread) +public class CompressionMetrics { + + /** + * Average bytes written per value after encoding. + * Lower values indicate better compression. + */ + public double encodedBytesPerValue; + + /** + * Compression ratio: raw size (8 bytes/value) divided by encoded size. + * Higher values indicate better compression. A ratio of 8.0 means + * the data was compressed to 1 byte per value. + */ + public double compressionRatio; + + /** + * Average bits used per value after encoding. + * Compare against the nominal input {@code bitsPerValue} to assess + * compression effectiveness. + */ + public double encodedBitsPerValue; + + /** + * Ratio of actual encoded size to theoretical minimum size at nominal bit width. + * + * Lower values are better. Useful for comparing encoder versions on the same data pattern. + */ + public double overheadRatio; + + /** Number of values in each encoded block. */ + private int blockSize; + + /** Actual bytes produced after encoding one block. */ + private int encodedBytesPerBlock; + + /** The nominal bits per value being tested (benchmark parameter). */ + private int nominalBitsPerValue; + + /** + * Resets all metrics at the start of each iteration. + */ + @Setup(Level.Iteration) + public void setupIteration() { + encodedBytesPerValue = 0; + compressionRatio = 0; + encodedBitsPerValue = 0; + overheadRatio = 0; + blockSize = 0; + encodedBytesPerBlock = 0; + nominalBitsPerValue = 0; + } + + /** + * Records metrics for a single benchmark operation. + * Call this method at the end of each {@code @Benchmark} method. + * + * @param blockSize number of values per encoded block + * @param encodedBytes actual bytes produced after encoding one block + * @param nominalBits the nominal bits per value being tested + */ + public void recordOperation(int blockSize, int encodedBytes, int nominalBits) { + this.blockSize = blockSize; + this.encodedBytesPerBlock = encodedBytes; + this.nominalBitsPerValue = nominalBits; + } + + /** + * Computes final compression metrics at the end of each iteration. + * Called automatically by JMH after all operations in an iteration complete. + */ + @TearDown(Level.Iteration) + public void computeMetrics() { + if (blockSize == 0) { + return; + } + + long rawBytes = (long) blockSize * Long.BYTES; + long theoreticalMin = Math.ceilDiv((long) blockSize * nominalBitsPerValue, Byte.SIZE); + + encodedBytesPerValue = (double) encodedBytesPerBlock / blockSize; + compressionRatio = (double) rawBytes / encodedBytesPerBlock; + encodedBitsPerValue = encodedBytesPerValue * Byte.SIZE; + overheadRatio = theoreticalMin > 0 ? (double) encodedBytesPerBlock / theoreticalMin : 0; + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java index 34acd82d812c8..01a90361558f1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java @@ -23,8 +23,8 @@ public ConstantIntegerSupplier(int seed, int bitsPerValue, int size) { @Override public long[] get() { long[] data = new long[size]; - long max = 1L << bitsPerValue; - Arrays.fill(data, random.nextLong(max)); + long value = bitsPerValue == 64 ? random.nextLong() : random.nextLong(1L << bitsPerValue); + Arrays.fill(data, value); return data; } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java index 284324b3d9206..732c96abb1231 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java @@ -11,39 +11,58 @@ import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.ByteArrayDataOutput; -import org.apache.lucene.store.DataOutput; -import org.openjdk.jmh.infra.Blackhole; import java.io.IOException; import java.util.function.Supplier; -public class DecodeBenchmark extends AbstractDocValuesForUtilBenchmark { - protected ByteArrayDataInput dataInput; - protected long[] output; - protected long[] input; - private byte[] outputBuffer; - private byte[] inputBuffer; +/** + * Decoding benchmark for TSDB doc values. + * + *

Measures the performance of {@link org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder#decode}, + * which decompresses a byte buffer back into a block of long values. + * + *

During setup, input data is encoded to create a realistic compressed buffer for decoding. + * + * @see EncodeBenchmark + */ +public final class DecodeBenchmark extends AbstractTSDBCodecBenchmark { + + private ByteArrayDataInput dataInput; + private long[] output; + private byte[] encodedBuffer; @Override - public void setupIteration(int bitsPerValue, final Supplier arraySupplier) throws IOException { + public void setupTrial(Supplier arraySupplier) throws IOException { this.output = new long[blockSize]; - this.input = arraySupplier.get(); - this.outputBuffer = new byte[Long.BYTES * blockSize]; - final DataOutput dataOutput = new ByteArrayDataOutput(outputBuffer); - forUtil.encode(this.input, bitsPerValue, dataOutput); - this.inputBuffer = new byte[Long.BYTES * blockSize]; - this.dataInput = new ByteArrayDataInput(this.inputBuffer); - System.arraycopy(outputBuffer, 0, inputBuffer, 0, outputBuffer.length); + long[] input = arraySupplier.get(); + + byte[] tempBuffer = new byte[Long.BYTES * blockSize + EXTRA_METADATA_SIZE]; + ByteArrayDataOutput dataOutput = new ByteArrayDataOutput(tempBuffer); + encoder.encode(input, dataOutput); + int encodedLength = dataOutput.getPosition(); + + this.encodedBuffer = new byte[encodedLength]; + System.arraycopy(tempBuffer, 0, encodedBuffer, 0, encodedLength); + this.dataInput = new ByteArrayDataInput(this.encodedBuffer); + } + + @Override + public void setupInvocation() { + this.dataInput.reset(this.encodedBuffer); + } + + @Override + public void run() throws IOException { + encoder.decode(this.dataInput, this.output); } @Override - public void setupInvocation(int bitsPerValue) { - this.dataInput.reset(this.inputBuffer); + protected Object getOutput() { + return this.output; } @Override - public void benchmark(int bitsPerValue, Blackhole bh) throws IOException { - forUtil.decode(bitsPerValue, this.dataInput, this.output); - bh.consume(this.output); + public int getEncodedSize() { + return encodedBuffer.length; } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecreasingIntegerSupplier.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecreasingIntegerSupplier.java index 7813260298bef..fa7c16c38a491 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecreasingIntegerSupplier.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecreasingIntegerSupplier.java @@ -24,11 +24,10 @@ public DecreasingIntegerSupplier(int seed, int bitsPerValue, int size) { @Override public long[] get() { final long[] data = new long[size]; - long max = 1L << bitsPerValue; + final long max = 1L << bitsPerValue; for (int i = 0; i < size; i++) { - data[i] = random.nextLong(max); + data[i] = bitsPerValue == 64 ? random.nextLong() : random.nextLong(max); } return Arrays.stream(data).boxed().sorted(Collections.reverseOrder()).mapToLong(Long::longValue).toArray(); - } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/EncodeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/EncodeBenchmark.java index 9b0dba1cc6038..475e939c9cb51 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/EncodeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/EncodeBenchmark.java @@ -10,31 +10,51 @@ package org.elasticsearch.benchmark.index.codec.tsdb.internal; import org.apache.lucene.store.ByteArrayDataOutput; -import org.openjdk.jmh.infra.Blackhole; import java.io.IOException; import java.util.function.Supplier; -public class EncodeBenchmark extends AbstractDocValuesForUtilBenchmark { - protected ByteArrayDataOutput dataOutput; - protected long[] input; - protected byte[] output; +/** + * Encoding benchmark for TSDB doc values. + * + *

Measures the performance of {@link org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder#encode}, + * which compresses a block of long values into a byte buffer. + * + * @see DecodeBenchmark + */ +public final class EncodeBenchmark extends AbstractTSDBCodecBenchmark { + + private ByteArrayDataOutput dataOutput; + private long[] originalInput; + private long[] input; + private byte[] output; @Override - public void setupIteration(int unUsedBitsPerValue, Supplier arraySupplier) throws IOException { - this.input = arraySupplier.get(); - this.output = new byte[Long.BYTES * blockSize]; + public void setupTrial(Supplier arraySupplier) throws IOException { + this.originalInput = arraySupplier.get(); + this.input = new long[originalInput.length]; + this.output = new byte[Long.BYTES * blockSize + EXTRA_METADATA_SIZE]; this.dataOutput = new ByteArrayDataOutput(this.output); } @Override - public void setupInvocation(int unusedBitsPerValue) { + public void setupInvocation() { + System.arraycopy(originalInput, 0, input, 0, originalInput.length); dataOutput.reset(this.output); } @Override - public void benchmark(int bitsPerValue, Blackhole bh) throws IOException { - forUtil.encode(this.input, bitsPerValue, this.dataOutput); - bh.consume(this.dataOutput); + public void run() throws IOException { + encoder.encode(this.input, this.dataOutput); + } + + @Override + protected Object getOutput() { + return this.dataOutput; + } + + @Override + public int getEncodedSize() { + return dataOutput.getPosition(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/IncreasingIntegerSupplier.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/IncreasingIntegerSupplier.java index ed3c433d548fb..a48c368dd5330 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/IncreasingIntegerSupplier.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/IncreasingIntegerSupplier.java @@ -24,9 +24,9 @@ public IncreasingIntegerSupplier(int seed, int bitsPerValue, int size) { @Override public long[] get() { final long[] data = new long[size]; - long max = 1L << bitsPerValue; + final long max = 1L << bitsPerValue; for (int i = 0; i < size; i++) { - data[i] = random.nextLong(max); + data[i] = bitsPerValue == 64 ? random.nextLong() : random.nextLong(max); } Arrays.sort(data); return data; diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/NonSortedIntegerSupplier.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/NonSortedIntegerSupplier.java index d8ffa07d4e252..ba3f24f4e12d6 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/NonSortedIntegerSupplier.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/NonSortedIntegerSupplier.java @@ -22,9 +22,9 @@ public NonSortedIntegerSupplier(int seed, int bitsPerValue, int size) { @Override public long[] get() { final long[] data = new long[size]; - long max = 1L << bitsPerValue; + final long max = 1L << bitsPerValue; for (int i = 0; i < size; i++) { - data[i] = random.nextLong(max); + data[i] = bitsPerValue == 64 ? random.nextLong() : random.nextLong(max); } return data; } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ThroughputMetrics.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ThroughputMetrics.java new file mode 100644 index 0000000000000..cc021950a28ea --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ThroughputMetrics.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.benchmark.index.codec.tsdb.internal; + +import org.openjdk.jmh.annotations.AuxCounters; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +/** + * JMH auxiliary counters for tracking throughput in TSDB codec benchmarks. + * + *

This class uses JMH's {@link AuxCounters} with {@link AuxCounters.Type#OPERATIONS} + * to report throughput metrics normalized by time. JMH automatically divides accumulated + * values by benchmark time to produce rate metrics (e.g., bytes/s, values/s). + * + *

Important: {@link AuxCounters.Type#OPERATIONS} only works with + * {@link org.openjdk.jmh.annotations.Mode#Throughput} or + * {@link org.openjdk.jmh.annotations.Mode#AverageTime}. It does NOT work with + * {@link org.openjdk.jmh.annotations.Mode#SampleTime}. + * + *

Usage

+ *
{@code
+ * @BenchmarkMode(Mode.Throughput)
+ * @Benchmark
+ * public void benchmark(Blackhole bh, ThroughputMetrics metrics) {
+ *     encode.benchmark(bh);
+ *     metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize());
+ * }
+ * }
+ */ +@AuxCounters(AuxCounters.Type.OPERATIONS) +@State(Scope.Thread) +public class ThroughputMetrics { + + /** Bytes encoded or decoded. JMH normalizes by time to produce bytes/s. */ + public long encodedBytes; + + /** Values processed. JMH normalizes by time to produce values/s. */ + public long valuesProcessed; + + /** + * Resets all metrics at the start of each iteration. + */ + @Setup(Level.Iteration) + public void setupIteration() { + encodedBytes = 0; + valuesProcessed = 0; + } + + /** + * Records throughput data for a single benchmark operation. + * + * @param blockSize number of values processed in this operation + * @param bytes number of bytes produced or consumed + */ + public void recordOperation(int blockSize, int bytes) { + encodedBytes += bytes; + valuesProcessed += blockSize; + } +}