From 6272a3bfb2acb272de5cc6df5c98a7021e04719c Mon Sep 17 00:00:00 2001 From: Travis Bischel Date: Tue, 31 Aug 2021 22:47:33 -0600 Subject: [PATCH] breaking: rename BatchMaxBytes to ProducerBatchMaxBytes Similar to the prior two commits, this one was even more ambiguous: did the BatchMaxBytes refer to the max bytes we would want per partition while fetching? The new name is clearer. --- pkg/kgo/config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/kgo/config.go b/pkg/kgo/config.go index 4d5b0470..453ee429 100644 --- a/pkg/kgo/config.go +++ b/pkg/kgo/config.go @@ -819,8 +819,8 @@ func ProducerBatchCompression(preference ...CompressionCodec) ProducerOpt { return producerOpt{func(cfg *cfg) { cfg.compression = preference }} } -// BatchMaxBytes upper bounds the size of a record batch, overriding the -// default 1MB. +// ProducerBatchMaxBytes upper bounds the size of a record batch, overriding +// the default 1MB. // // This corresponds to Kafka's max.message.bytes, which defaults to 1,000,012 // bytes (just over 1MB). @@ -832,10 +832,10 @@ func ProducerBatchCompression(preference ...CompressionCodec) ProducerOpt { // If a single record encodes larger than this number (before compression), it // will will not be written and a callback will have the appropriate error. // -// Note that this is the maximum size of a record batch before compression. -// If a batch compresses poorly and actually grows the batch, the uncompressed +// Note that this is the maximum size of a record batch before compression. If +// a batch compresses poorly and actually grows the batch, the uncompressed // form will be used. -func BatchMaxBytes(v int32) ProducerOpt { +func ProducerBatchMaxBytes(v int32) ProducerOpt { return producerOpt{func(cfg *cfg) { cfg.maxRecordBatchBytes = v }} }