|
4 | 4 | import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG;
|
5 | 5 | import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG;
|
6 | 6 | import static org.apache.kafka.clients.consumer.ConsumerConfig.MAX_POLL_RECORDS_CONFIG;
|
| 7 | +import static org.apache.kafka.clients.producer.ProducerConfig.BATCH_SIZE_CONFIG; |
7 | 8 | import static org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG;
|
8 | 9 | import static org.apache.kafka.clients.producer.ProducerConfig.LINGER_MS_CONFIG;
|
9 | 10 | import static org.apache.kafka.clients.producer.ProducerConfig.MAX_REQUEST_SIZE_CONFIG;
|
|
12 | 13 | import static org.apache.kafka.streams.StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG;
|
13 | 14 | import static org.apache.kafka.streams.StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG;
|
14 | 15 | import static org.apache.kafka.streams.StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG;
|
| 16 | +import static org.apache.kafka.streams.StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; |
15 | 17 | import static org.apache.kafka.streams.StreamsConfig.TOPOLOGY_OPTIMIZATION;
|
16 | 18 | import static org.apache.kafka.streams.StreamsConfig.consumerPrefix;
|
17 | 19 | import static org.apache.kafka.streams.StreamsConfig.producerPrefix;
|
|
35 | 37 | import org.apache.kafka.streams.kstream.KStream;
|
36 | 38 | import org.hypertrace.core.kafkastreams.framework.listeners.LoggingStateListener;
|
37 | 39 | import org.hypertrace.core.kafkastreams.framework.listeners.LoggingStateRestoreListener;
|
| 40 | +import org.hypertrace.core.kafkastreams.framework.rocksdb.RocksDBStateStoreConfigSetter; |
38 | 41 | import org.hypertrace.core.kafkastreams.framework.timestampextractors.UseWallclockTimeOnInvalidTimestamp;
|
39 | 42 | import org.hypertrace.core.kafkastreams.framework.topics.creator.KafkaTopicCreator;
|
40 | 43 | import org.hypertrace.core.kafkastreams.framework.util.ExceptionUtils;
|
@@ -141,20 +144,21 @@ public Map<String, Object> getBaseStreamsConfig() {
|
141 | 144 | .put(DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, UseWallclockTimeOnInvalidTimestamp.class);
|
142 | 145 | baseStreamsConfig.put(DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
143 | 146 | LogAndContinueExceptionHandler.class);
|
| 147 | + baseStreamsConfig.put(ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, RocksDBStateStoreConfigSetter.class); |
144 | 148 |
|
145 | 149 | // Default serde configurations
|
146 | 150 | baseStreamsConfig.put(DEFAULT_KEY_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
|
147 | 151 | baseStreamsConfig.put(DEFAULT_VALUE_SERDE_CLASS_CONFIG, SpecificAvroSerde.class);
|
148 | 152 |
|
149 | 153 | // Default producer configurations
|
150 |
| - baseStreamsConfig.put(producerPrefix(LINGER_MS_CONFIG), "5000"); |
151 |
| - baseStreamsConfig.put(producerPrefix(MAX_REQUEST_SIZE_CONFIG), "1048576"); |
| 154 | + baseStreamsConfig.put(producerPrefix(LINGER_MS_CONFIG), "2000"); |
| 155 | + baseStreamsConfig.put(producerPrefix(BATCH_SIZE_CONFIG), "2097152"); |
152 | 156 | baseStreamsConfig.put(producerPrefix(COMPRESSION_TYPE_CONFIG), CompressionType.GZIP.name);
|
| 157 | + baseStreamsConfig.put(producerPrefix(MAX_REQUEST_SIZE_CONFIG), "10485760"); |
153 | 158 |
|
154 | 159 | // Default consumer configurations
|
155 | 160 | baseStreamsConfig.put(consumerPrefix(MAX_POLL_RECORDS_CONFIG), "1000");
|
156 | 161 | baseStreamsConfig.put(consumerPrefix(AUTO_OFFSET_RESET_CONFIG), "latest");
|
157 |
| - baseStreamsConfig.put(consumerPrefix(AUTO_COMMIT_INTERVAL_MS_CONFIG), "5000"); |
158 | 162 |
|
159 | 163 | return baseStreamsConfig;
|
160 | 164 | }
|
|
0 commit comments