Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions docs/src/main/sphinx/admin/event-listeners-kafka.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,13 @@ Use the following properties for further configuration.
distinction in Kafka, if multiple Trino clusters send events to the same
Kafka system.
-
* - `kafka-event-listener.max-request-size`
- [Size value](prop-type-data-size) that specifies the maximum request size the Kafka producer can send;
messages exceeding this size will fail.
- `5MB`
* - `kafka-event-listener.batch-size`
- [Size value](prop-type-data-size) that specifies the size to batch before sending records to Kafka.
- `16KB`
* - `kafka-event-listener.publish-created-event`
- [Boolean](prop-type-boolean) switch to control publishing of query creation
events.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import io.airlift.configuration.ConfigDescription;
import io.airlift.configuration.DefunctConfig;
import io.airlift.configuration.validation.FileExists;
import io.airlift.units.DataSize;
import io.airlift.units.Duration;
import io.airlift.units.MinDuration;
import jakarta.validation.constraints.AssertTrue;
Expand All @@ -33,6 +34,8 @@

import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static io.airlift.units.DataSize.Unit.KILOBYTE;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.SECONDS;

Expand All @@ -48,6 +51,8 @@ public class KafkaEventListenerConfig
private Optional<String> splitCompletedTopicName = Optional.empty();
private String brokerEndpoints;
private Optional<String> clientId = Optional.empty();
private DataSize maxRequestSize = DataSize.of(5, MEGABYTE); // Greater than default value because the size of completed events are quite large
private DataSize batchSize = DataSize.of(16, KILOBYTE); // Default value of batch.size
private Set<String> excludedFields = Collections.emptySet();
private Duration requestTimeout = new Duration(10, SECONDS);
private boolean terminateOnInitializationFailure = true;
Expand Down Expand Up @@ -91,6 +96,32 @@ public KafkaEventListenerConfig setClientId(String clientId)
return this;
}

public DataSize getMaxRequestSize()
{
return maxRequestSize;
}

@ConfigDescription("The maximum size of a request/message in bytes")
@Config("kafka-event-listener.max-request-size")
public KafkaEventListenerConfig setMaxRequestSize(DataSize maxRequestSize)
{
this.maxRequestSize = maxRequestSize;
return this;
}

public DataSize getBatchSize()
{
return batchSize;
}

@ConfigDescription("Value that specifies the size to batch before sending records to Kafka")
@Config("kafka-event-listener.batch-size")
public KafkaEventListenerConfig setBatchSize(DataSize batchSize)
{
this.batchSize = batchSize;
return this;
}

public Optional<String> getCompletedTopicName()
{
return completedTopicName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ protected Map<String, Object> baseConfig(KafkaEventListenerConfig config)
kafkaClientConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
kafkaClientConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
kafkaClientConfig.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "zstd");
kafkaClientConfig.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "5242880");
kafkaClientConfig.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, Long.toString(config.getMaxRequestSize().toBytes()));
kafkaClientConfig.put(ProducerConfig.BATCH_SIZE_CONFIG, Long.toString(config.getBatchSize().toBytes()));
kafkaClientConfig.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, Long.toString(config.getRequestTimeout().toMillis()));
return kafkaClientConfig;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.airlift.units.DataSize;
import io.airlift.units.Duration;
import org.junit.jupiter.api.Test;

Expand All @@ -30,6 +31,9 @@
import static io.airlift.configuration.testing.ConfigAssertions.assertFullMapping;
import static io.airlift.configuration.testing.ConfigAssertions.assertRecordedDefaults;
import static io.airlift.configuration.testing.ConfigAssertions.recordDefaults;
import static io.airlift.units.DataSize.Unit.BYTE;
import static io.airlift.units.DataSize.Unit.KILOBYTE;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
import static org.assertj.core.api.Assertions.assertThat;

final class TestKafkaEventListenerConfig
Expand All @@ -46,6 +50,8 @@ void testDefaults()
.setCreatedTopicName(null)
.setSplitCompletedTopicName(null)
.setBrokerEndpoints(null)
.setMaxRequestSize(DataSize.of(5, MEGABYTE))
.setBatchSize(DataSize.of(16, KILOBYTE))
.setClientId(null)
.setExcludedFields(Set.of())
.setRequestTimeout(new Duration(10, TimeUnit.SECONDS))
Expand All @@ -66,6 +72,8 @@ void testExplicitPropertyMappings()
.put("kafka-event-listener.publish-completed-event", "false")
.put("kafka-event-listener.publish-split-completed-event", "true")
.put("kafka-event-listener.broker-endpoints", "kafka-host-1:9093,kafka-host-2:9093")
.put("kafka-event-listener.max-request-size", "1048576B")
.put("kafka-event-listener.batch-size", "81920B")
.put("kafka-event-listener.created-event.topic", "query_created")
.put("kafka-event-listener.completed-event.topic", "query_completed")
.put("kafka-event-listener.split-completed-event.topic", "split_completed")
Expand All @@ -84,6 +92,8 @@ void testExplicitPropertyMappings()
.setPublishCompletedEvent(false)
.setPublishSplitCompletedEvent(true)
.setBrokerEndpoints("kafka-host-1:9093,kafka-host-2:9093")
.setMaxRequestSize(DataSize.of(1048576, BYTE))
.setBatchSize(DataSize.of(81920, BYTE))
.setCreatedTopicName("query_created")
.setCompletedTopicName("query_completed")
.setSplitCompletedTopicName("split_completed")
Expand Down