diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java index 4ade83728b73b..2d71baf360b1a 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.ConstantIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +30,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding constant integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeConstantIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "15" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeConstantIntegerBenchmark() { this.decode = new DecodeBenchmark(); @@ -49,16 +54,17 @@ public DecodeConstantIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java index f41ae3a912a94..ab1887ed2088f 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +30,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding decreasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeDecreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeDecreasingIntegerBenchmark() { this.decode = new DecodeBenchmark(); @@ -49,16 +54,17 @@ public DecodeDecreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java index 40d0d41b4aefc..5655210bc9f18 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.IncreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,38 +30,41 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding increasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeIncreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeIncreasingIntegerBenchmark() { this.decode = new DecodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); - + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java index 7eea2d9d7a70e..617e816a6754c 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeNonSortedIntegerBenchmark.java @@ -9,9 +9,10 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.NonSortedIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +30,41 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for decoding non-sorted integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class DecodeNonSortedIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark decode; + private final AbstractTSDBCodecBenchmark decode; public DecodeNonSortedIntegerBenchmark() { this.decode = new DecodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - decode.setupInvocation(bitsPerValue); + decode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - decode.setupIteration(bitsPerValue, new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + decode.setupTrial(new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); } @Benchmark - public void benchmark(Blackhole bh) throws IOException { - decode.benchmark(bitsPerValue, bh); + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + decode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize()); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java index 47579b3acd410..7fe1cd755b35b 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeConstantIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.ConstantIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +31,58 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding constant integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeConstantIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "15" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeConstantIntegerBenchmark() { this.encode = new EncodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *
Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java index 73d0e13a76a77..2fe7f75c7baf3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeDecreasingIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecreasingIntegerSupplier; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +31,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding decreasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeDecreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeDecreasingIntegerBenchmark() { this.encode = new EncodeBenchmark(); @@ -49,16 +55,34 @@ public EncodeDecreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *
Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java index f9e164972bebb..699fdbac349b1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeIncreasingIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.IncreasingIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,19 +31,23 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding increasing integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeIncreasingIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeIncreasingIntegerBenchmark() { this.encode = new EncodeBenchmark(); @@ -49,16 +55,34 @@ public EncodeIncreasingIntegerBenchmark() { @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *
Uses zero warmup and single iteration because compression metrics are deterministic: + * the same input data always produces the same encoded size. Unlike throughput measurements + * which vary due to JIT compilation and CPU state, compression ratios are constant across runs. + */ @Benchmark - public void benchmark(Blackhole bh) throws IOException { - encode.benchmark(bitsPerValue, bh); + @Warmup(iterations = 0) + @Measurement(iterations = 1) + public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java index f195228a723e5..413bbbdb422c7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/EncodeNonSortedIntegerBenchmark.java @@ -9,9 +9,11 @@ package org.elasticsearch.benchmark.index.codec.tsdb; -import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractDocValuesForUtilBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.CompressionMetrics; import org.elasticsearch.benchmark.index.codec.tsdb.internal.EncodeBenchmark; import org.elasticsearch.benchmark.index.codec.tsdb.internal.NonSortedIntegerSupplier; +import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -29,37 +31,58 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +/** + * Benchmark for encoding non-sorted integer patterns. + */ @Fork(value = 1) @Warmup(iterations = 3) -@Measurement(iterations = 10) -@BenchmarkMode(value = Mode.AverageTime) -@OutputTimeUnit(value = TimeUnit.NANOSECONDS) -@State(value = Scope.Benchmark) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@State(Scope.Benchmark) public class EncodeNonSortedIntegerBenchmark { private static final int SEED = 17; private static final int BLOCK_SIZE = 128; - @Param({ "4", "8", "12", "16", "20", "24", "28", "32", "36", "40", "44", "48", "52", "56", "60", "64" }) + + @Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" }) private int bitsPerValue; - private final AbstractDocValuesForUtilBenchmark encode; + private final AbstractTSDBCodecBenchmark encode; public EncodeNonSortedIntegerBenchmark() { this.encode = new EncodeBenchmark(); - } @Setup(Level.Invocation) public void setupInvocation() throws IOException { - encode.setupInvocation(bitsPerValue); + encode.setupInvocation(); } - @Setup(Level.Iteration) - public void setupIteration() throws IOException { - encode.setupIteration(bitsPerValue, new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + @Setup(Level.Trial) + public void setupTrial() throws IOException { + encode.setupTrial(new NonSortedIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE)); + encode.setupInvocation(); + encode.run(); + } + + @Benchmark + public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException { + encode.benchmark(bh); + metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize()); } + /** + * Measures compression efficiency metrics (compression ratio, encoded bits/bytes per value). + * + *
Uses zero warmup and single iteration because compression metrics are deterministic:
+ * the same input data always produces the same encoded size. Unlike throughput measurements
+ * which vary due to JIT compilation and CPU state, compression ratios are constant across runs.
+ */
@Benchmark
- public void benchmark(Blackhole bh) throws IOException {
- encode.benchmark(bitsPerValue, bh);
+ @Warmup(iterations = 0)
+ @Measurement(iterations = 1)
+ public void compression(Blackhole bh, CompressionMetrics metrics) throws IOException {
+ encode.benchmark(bh);
+ metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue);
}
}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java
deleted file mode 100644
index 53723f05728b5..0000000000000
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/AbstractDocValuesForUtilBenchmark.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.benchmark.index.codec.tsdb.internal;
-
-import org.elasticsearch.index.codec.tsdb.DocValuesForUtil;
-import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat;
-import org.openjdk.jmh.infra.Blackhole;
-
-import java.io.IOException;
-import java.util.function.Supplier;
-
-public abstract class AbstractDocValuesForUtilBenchmark {
- protected final DocValuesForUtil forUtil;
- protected final int blockSize;
-
- public AbstractDocValuesForUtilBenchmark() {
- this.forUtil = new DocValuesForUtil(ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE);
- this.blockSize = ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE;
- }
-
- public abstract void benchmark(int bitsPerValue, Blackhole bh) throws IOException;
-
- public abstract void setupIteration(int bitsPerValue, final Supplier This abstract class provides the common structure for benchmarking the
+ * {@link TSDBDocValuesEncoder}. It uses the Template Method pattern where:
+ * The {@link TSDBDocValuesEncoder} writes metadata alongside bit-packed data for each
+ * encoding step (delta, offset, GCD). This buffer headroom ensures we never overflow
+ * during encoding. The theoretical maximum is ~32 bytes; we use 64 for safety.
+ */
+ protected static final int EXTRA_METADATA_SIZE = 64;
+
+ /** The encoder instance used for all encode/decode operations. */
+ protected final TSDBDocValuesEncoder encoder;
+
+ /** Number of values per block (typically 128). */
+ protected final int blockSize;
+
+ /**
+ * Creates a new benchmark instance with the standard TSDB block size.
+ */
+ public AbstractTSDBCodecBenchmark() {
+ this.blockSize = ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE;
+ this.encoder = new TSDBDocValuesEncoder(blockSize);
+ }
+
+ /**
+ * Executes the core encode or decode operation. An encoder would normally implement the
+ * encoding logic while a decoder would implement the decoding logic.
+ *
+ * @throws IOException if encoding or decoding fails
+ */
+ public abstract void run() throws IOException;
+
+ /**
+ * Returns the output expected to be consumed by the JMH blackhole.
+ *
+ * @return the benchmark output (encoded bytes or decoded values)
+ */
+ protected abstract Object getOutput();
+
+ /**
+ * Template method that runs the operation and consumes the result (encode or decode).
+ * This is the method called by JMH during benchmark iterations.
+ *
+ * @param bh the JMH blackhole for consuming results
+ * @throws IOException if the operation fails
+ */
+ public void benchmark(Blackhole bh) throws IOException {
+ run();
+ bh.consume(getOutput());
+ }
+
+ /**
+ * Sets up state for a new benchmark trial (once per parameter combination).
+ * Called once at the start of each parameter combination to initialize input data.
+ *
+ * @param arraySupplier supplier that generates the input array for this trial
+ * @throws IOException if setup fails
+ */
+ public abstract void setupTrial(Supplier This class uses JMH's {@link AuxCounters} feature to report compression metrics
+ * alongside timing data. Metrics are accumulated during benchmark operations and
+ * computed at iteration teardown.
+ *
+ * Measures the performance of {@link org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder#decode},
+ * which decompresses a byte buffer back into a block of long values.
+ *
+ * During setup, input data is encoded to create a realistic compressed buffer for decoding.
+ *
+ * @see EncodeBenchmark
+ */
+public final class DecodeBenchmark extends AbstractTSDBCodecBenchmark {
+
+ private ByteArrayDataInput dataInput;
+ private long[] output;
+ private byte[] encodedBuffer;
@Override
- public void setupIteration(int bitsPerValue, final Supplier Measures the performance of {@link org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder#encode},
+ * which compresses a block of long values into a byte buffer.
+ *
+ * @see DecodeBenchmark
+ */
+public final class EncodeBenchmark extends AbstractTSDBCodecBenchmark {
+
+ private ByteArrayDataOutput dataOutput;
+ private long[] originalInput;
+ private long[] input;
+ private byte[] output;
@Override
- public void setupIteration(int unUsedBitsPerValue, Supplier This class uses JMH's {@link AuxCounters} with {@link AuxCounters.Type#OPERATIONS}
+ * to report throughput metrics normalized by time. JMH automatically divides accumulated
+ * values by benchmark time to produce rate metrics (e.g., bytes/s, values/s).
+ *
+ * Important: {@link AuxCounters.Type#OPERATIONS} only works with
+ * {@link org.openjdk.jmh.annotations.Mode#Throughput} or
+ * {@link org.openjdk.jmh.annotations.Mode#AverageTime}. It does NOT work with
+ * {@link org.openjdk.jmh.annotations.Mode#SampleTime}.
+ *
+ *
+ *
+ *
+ * @see EncodeBenchmark
+ * @see DecodeBenchmark
+ */
+public abstract class AbstractTSDBCodecBenchmark {
+
+ /**
+ * Extra bytes allocated beyond the raw data size to accommodate encoding metadata.
+ *
+ * Usage
+ * {@code
+ * @Benchmark
+ * public void benchmark(Blackhole bh, CompressionMetrics metrics) {
+ * encode.benchmark(bh);
+ * metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize(), bitsPerValue);
+ * }
+ * }
+ */
+@AuxCounters(AuxCounters.Type.EVENTS)
+@State(Scope.Thread)
+public class CompressionMetrics {
+
+ /**
+ * Average bytes written per value after encoding.
+ * Lower values indicate better compression.
+ */
+ public double encodedBytesPerValue;
+
+ /**
+ * Compression ratio: raw size (8 bytes/value) divided by encoded size.
+ * Higher values indicate better compression. A ratio of 8.0 means
+ * the data was compressed to 1 byte per value.
+ */
+ public double compressionRatio;
+
+ /**
+ * Average bits used per value after encoding.
+ * Compare against the nominal input {@code bitsPerValue} to assess
+ * compression effectiveness.
+ */
+ public double encodedBitsPerValue;
+
+ /**
+ * Ratio of actual encoded size to theoretical minimum size at nominal bit width.
+ *
+ *
+ * Lower values are better. Useful for comparing encoder versions on the same data pattern.
+ */
+ public double overheadRatio;
+
+ /** Number of values in each encoded block. */
+ private int blockSize;
+
+ /** Actual bytes produced after encoding one block. */
+ private int encodedBytesPerBlock;
+
+ /** The nominal bits per value being tested (benchmark parameter). */
+ private int nominalBitsPerValue;
+
+ /**
+ * Resets all metrics at the start of each iteration.
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration() {
+ encodedBytesPerValue = 0;
+ compressionRatio = 0;
+ encodedBitsPerValue = 0;
+ overheadRatio = 0;
+ blockSize = 0;
+ encodedBytesPerBlock = 0;
+ nominalBitsPerValue = 0;
+ }
+
+ /**
+ * Records metrics for a single benchmark operation.
+ * Call this method at the end of each {@code @Benchmark} method.
+ *
+ * @param blockSize number of values per encoded block
+ * @param encodedBytes actual bytes produced after encoding one block
+ * @param nominalBits the nominal bits per value being tested
+ */
+ public void recordOperation(int blockSize, int encodedBytes, int nominalBits) {
+ this.blockSize = blockSize;
+ this.encodedBytesPerBlock = encodedBytes;
+ this.nominalBitsPerValue = nominalBits;
+ }
+
+ /**
+ * Computes final compression metrics at the end of each iteration.
+ * Called automatically by JMH after all operations in an iteration complete.
+ */
+ @TearDown(Level.Iteration)
+ public void computeMetrics() {
+ if (blockSize == 0) {
+ return;
+ }
+
+ long rawBytes = (long) blockSize * Long.BYTES;
+ long theoreticalMin = Math.ceilDiv((long) blockSize * nominalBitsPerValue, Byte.SIZE);
+
+ encodedBytesPerValue = (double) encodedBytesPerBlock / blockSize;
+ compressionRatio = (double) rawBytes / encodedBytesPerBlock;
+ encodedBitsPerValue = encodedBytesPerValue * Byte.SIZE;
+ overheadRatio = theoreticalMin > 0 ? (double) encodedBytesPerBlock / theoreticalMin : 0;
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java
index 34acd82d812c8..01a90361558f1 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java
@@ -23,8 +23,8 @@ public ConstantIntegerSupplier(int seed, int bitsPerValue, int size) {
@Override
public long[] get() {
long[] data = new long[size];
- long max = 1L << bitsPerValue;
- Arrays.fill(data, random.nextLong(max));
+ long value = bitsPerValue == 64 ? random.nextLong() : random.nextLong(1L << bitsPerValue);
+ Arrays.fill(data, value);
return data;
}
}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java
index 284324b3d9206..732c96abb1231 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/DecodeBenchmark.java
@@ -11,39 +11,58 @@
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.ByteArrayDataOutput;
-import org.apache.lucene.store.DataOutput;
-import org.openjdk.jmh.infra.Blackhole;
import java.io.IOException;
import java.util.function.Supplier;
-public class DecodeBenchmark extends AbstractDocValuesForUtilBenchmark {
- protected ByteArrayDataInput dataInput;
- protected long[] output;
- protected long[] input;
- private byte[] outputBuffer;
- private byte[] inputBuffer;
+/**
+ * Decoding benchmark for TSDB doc values.
+ *
+ * Usage
+ * {@code
+ * @BenchmarkMode(Mode.Throughput)
+ * @Benchmark
+ * public void benchmark(Blackhole bh, ThroughputMetrics metrics) {
+ * encode.benchmark(bh);
+ * metrics.recordOperation(BLOCK_SIZE, encode.getEncodedSize());
+ * }
+ * }
+ */
+@AuxCounters(AuxCounters.Type.OPERATIONS)
+@State(Scope.Thread)
+public class ThroughputMetrics {
+
+ /** Bytes encoded or decoded. JMH normalizes by time to produce bytes/s. */
+ public long encodedBytes;
+
+ /** Values processed. JMH normalizes by time to produce values/s. */
+ public long valuesProcessed;
+
+ /**
+ * Resets all metrics at the start of each iteration.
+ */
+ @Setup(Level.Iteration)
+ public void setupIteration() {
+ encodedBytes = 0;
+ valuesProcessed = 0;
+ }
+
+ /**
+ * Records throughput data for a single benchmark operation.
+ *
+ * @param blockSize number of values processed in this operation
+ * @param bytes number of bytes produced or consumed
+ */
+ public void recordOperation(int blockSize, int bytes) {
+ encodedBytes += bytes;
+ valuesProcessed += blockSize;
+ }
+}