-
Notifications
You must be signed in to change notification settings - Fork 15
/
kafka.plugin.zsh
446 lines (443 loc) · 82.5 KB
/
kafka.plugin.zsh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
#!/bin/sh
#
# DISCLAIMER: THIS FILE HAS BEEN AUTOMATICALLY GENERATED
# PLEASE DO NOT TOUCH!!!
# IF YOU NEED TO DO ANY MODIFICATION, EDIT GENERATE.ZSH
# FOR MORE INFORMATION https://github.com/Dabz/kafka-zsh-completions
#
function _kafka-command() {
cmd=$1
arg_name="_$(echo $cmd | tr - _)_args"
typeset -a options
eval _arguments \$$arg_name
}
function _kafka-list-topic() {
compadd `kcat -L -J | jq '.topics[].topic' -r`
}
declare -a _kafka_acls_args
_kafka_acls_args=()
_kafka_acls_args+=('--add[ Indicates you are trying to add ACLs.]')
_kafka_acls_args+=('--allow-host[ Host from which principals listed in -- allow-principal will have access. If you have specified --allow-principal then the default for this option will be set to * which allows access from all hosts.]:file:_files')
_kafka_acls_args+=('--allow-principal[ principal is in principalType:name format. Note that principalType must be supported by the Authorizer being used. For example, User:* is the wild card indicating all users.]:file:_files')
_kafka_acls_args+=('--authorizer[ DEPRECATED: Fully qualified class name of the authorizer, which defaults to kafka.security.authorizer. AclAuthorizer if --bootstrap-server is not provided. Warning: support for ACL configuration directly through the authorizer is deprecated and will be removed in a future release. Please use --bootstrap- server instead to set ACLs through the admin client.]:file:_files')
_kafka_acls_args+=('--authorizer-properties[ DEPRECATED: The properties required to configure an instance of the Authorizer specified by -- authorizer. These are key=val pairs. For the default authorizer, example values are: zookeeper. connect=localhost:2181. Warning: support for ACL configuration directly through the authorizer is deprecated and will be removed in a future release. Please use -- bootstrap-server instead to set ACLs through the admin client.]:file:_files')
_kafka_acls_args+=('--bootstrap-server[ A list of host/port pairs to use for establishing the connection to the Kafka cluster. This list should be in the form host1:port1,host2: port2,... This config is required for acl management using admin client API.]:file:_files')
_kafka_acls_args+=('--cluster[ Add/Remove cluster ACLs.]')
_kafka_acls_args+=('--command-config[ A property file containing configs to be passed to Admin Client.]:file:_files')
_kafka_acls_args+=('--consumer[ Convenience option to add/remove ACLs for consumer role. This will generate ACLs that allows READ, DESCRIBE on topic and READ on group.]')
_kafka_acls_args+=('--delegation-token[ Delegation token to which ACLs should be added or removed. A value of * indicates ACL should apply to all tokens.]:file:_files')
_kafka_acls_args+=('--deny-host[ Host from which principals listed in -- deny-principal will be denied access. If you have specified --deny- principal then the default for this option will be set to * which denies access from all hosts.]:file:_files')
_kafka_acls_args+=('--deny-principal[ principal is in principalType:name format. By default anyone not added through --allow-principal is denied access. You only need to use this option as negation to already allowed set. Note that principalType must be supported by the Authorizer being used. For example if you wanted to allow access to all users in the system but not test-user you can define an ACL that allows access to User:* and specify --deny- principal=User:[email protected]. AND PLEASE REMEMBER DENY RULES TAKES PRECEDENCE OVER ALLOW RULES.]:file:_files')
_kafka_acls_args+=('--force[ Assume Yes to all queries and do not prompt.]')
_kafka_acls_args+=('--group[ Consumer Group to which the ACLs should be added or removed. A value of * indicates the ACLs should apply to all groups.]:file:_files')
_kafka_acls_args+=('--help[ Print usage information.]')
_kafka_acls_args+=('--idempotent[ Enable idempotence for the producer. This should be used in combination with the --producer option. Note that idempotence is enabled automatically if the producer is authorized to a particular transactional-id.]')
_kafka_acls_args+=('--link-id[ Cluster link ID associated with the ACLs for add/remove/list.]:file:_files')
_kafka_acls_args+=('--list[ List ACLs for the specified resource, use --topic <topic> or --group <group> or --cluster to specify a resource.]')
_kafka_acls_args+=('--operation[ Operation that is being allowed or denied. Valid operation names are: Describe DescribeConfigs Alter IdempotentWrite Read Delete Create ClusterAction All Write AlterConfigs (default: All)]:file:_files')
_kafka_acls_args+=('--principal[ List ACLs for the specified principal. principal is in principalType:name format. Note that principalType must be supported by the Authorizer being used. Multiple --principal option can be passed.]:file:_files')
_kafka_acls_args+=('--producer[ Convenience option to add/remove ACLs for producer role. This will generate ACLs that allows WRITE, DESCRIBE and CREATE on topic.]')
_kafka_acls_args+=('--remove[ Indicates you are trying to remove ACLs.]')
_kafka_acls_args+=('--resource-pattern-type[ The type of the resource pattern or ALL_TENANT_LITERAL|CONFLUENT_ALL_TENANT_PREFIXED|CONFLUENT_ALL_TENANT_ANY|CONFLUENT_ONLY_TENANT_MATCH> pattern filter. When adding acls, this should be a specific pattern type, e.g. ''literal'' or ''prefixed''. When listing or removing acls, a specific pattern type can be used to list or remove acls from specific resource patterns, or use the filter values of ''any'' or ''match'', where ''any'' will match any pattern type, but will match the resource name exactly, where as ''match'' will perform pattern matching to list or remove all acls that affect the supplied resource(s). WARNING: ''match'', when used in combination with the ''--remove'' switch, should be used with care. (default: LITERAL)]')
_kafka_acls_args+=('--topic[ topic to which ACLs should be added or removed. A value of * indicates ACL should apply to all topics.]:topic:_kafka-list-topic')
_kafka_acls_args+=('--transactional-id[ The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds.]:file:_files')
_kafka_acls_args+=('--version[ Display Kafka version.]')
_kafka_acls_args+=('--zk-tls-config-file[ DEPRECATED: Identifies the file where ZooKeeper client TLS connectivity properties are defined for the default authorizer kafka.security. authorizer.AclAuthorizer. Any properties other than the following (with or without an "authorizer." prefix) are ignored: zookeeper. clientCnxnSocket, zookeeper.ssl. cipher.suites, zookeeper.ssl.client. enable, zookeeper.ssl.crl.enable, zookeeper.ssl.enabled.protocols, zookeeper.ssl.endpoint. identification.algorithm, zookeeper. ssl.keystore.location, zookeeper.ssl. keystore.password, zookeeper.ssl. keystore.type, zookeeper.ssl.ocsp. enable, zookeeper.ssl.protocol, zookeeper.ssl.truststore.location, zookeeper.ssl.truststore.password, zookeeper.ssl.truststore.type. Note that if SASL is not configured and zookeeper.set.acl is supposed to be true due to mutual certificate authentication being used then it is necessary to explicitly specify -- authorizer-properties zookeeper.set. acl=true. Warning: support for ACL configuration directly through the authorizer is deprecated and will be removed in a future release. Please use --bootstrap-server instead to set ACLs through the admin client.]:file:_files')
compdef "_kafka-command kafka-acls" kafka-acls
declare -a _kafka_avro_console_consumer_args
_kafka_avro_console_consumer_args=()
_kafka_avro_console_consumer_args+=('--bootstrap-server[ REQUIRED: The server(s) to connect to.]:file:_files')
_kafka_avro_console_consumer_args+=('--consumer-property[ A mechanism to pass user-defined properties in the form key=value to the consumer.]:file:_files')
_kafka_avro_console_consumer_args+=('--consumer.config[ Consumer config properties file. Note that \[consumer-property\] takes precedence over this config.]:file:_files')
_kafka_avro_console_consumer_args+=('--enable-systest-events[ Log lifecycle events of the consumer in addition to logging consumed messages. (This is specific for system tests.)]')
_kafka_avro_console_consumer_args+=('--formatter[ The name of a class to use for formatting kafka messages for display. (default: kafka.tools. DefaultMessageFormatter)]:file:_files')
_kafka_avro_console_consumer_args+=('--from-beginning[ If the consumer does not already have an established offset to consume from, start with the earliest message present in the log rather than the latest message.]')
_kafka_avro_console_consumer_args+=('--group[ The consumer group id of the consumer.]:file:_files')
_kafka_avro_console_consumer_args+=('--help[ Print usage information.]')
_kafka_avro_console_consumer_args+=('--include[ Regular expression specifying list of topics to include for consumption.]:file:_files')
_kafka_avro_console_consumer_args+=('--isolation-level[ Set to read_committed in order to filter out transactional messages which are not committed. Set to read_uncommitted to read all messages. (default: read_uncommitted)]:file:_files')
_kafka_avro_console_consumer_args+=('--key-deserializer[]:file:_files')
_kafka_avro_console_consumer_args+=('--max-messages[ The maximum number of messages to consume before exiting. If not set, consumption is continual.]:file:_files')
_kafka_avro_console_consumer_args+=('--offset[ The offset to consume from (a non- negative number), or ''earliest'' which means from beginning, or ''latest'' which means from end (default: latest)]:file:_files')
_kafka_avro_console_consumer_args+=('--partition[ The partition to consume from. Consumption starts from the end of the partition unless ''--offset'' is specified.]:file:_files')
_kafka_avro_console_consumer_args+=('--property[ The properties to initialize the message formatter. Default properties include: print.timestamp=true|false print.key=true|false print.offset=true|false print.partition=true|false print.headers=true|false print.value=true|false key.separator=<key.separator> line.separator=<line.separator> headers.separator=<line.separator> null.literal=<null.literal> key.deserializer=<key.deserializer> value.deserializer=<value. deserializer> header.deserializer=<header. deserializer> Users can also pass in customized properties for their formatter; more specifically, users can pass in properties keyed with ''key. deserializer.'', ''value. deserializer.'' and ''headers. deserializer.'' prefixes to configure their deserializers.]:file:_files')
_kafka_avro_console_consumer_args+=('--skip-message-on-error[ If there is an error when processing a message, skip it instead of halt.]')
_kafka_avro_console_consumer_args+=('--timeout-ms[ If specified, exit if no message is available for consumption for the specified interval.]:file:_files')
_kafka_avro_console_consumer_args+=('--topic[ The topic to consume on.]:topic:_kafka-list-topic')
_kafka_avro_console_consumer_args+=('--value-deserializer[]:file:_files')
_kafka_avro_console_consumer_args+=('--version[ Display Kafka version.]')
_kafka_avro_console_consumer_args+=('--whitelist[ DEPRECATED, use --include instead; ignored if --include specified. Regular expression specifying list of topics to include for consumption.]:file:_files')
compdef "_kafka-command kafka-avro-console-consumer" kafka-avro-console-consumer
declare -a _kafka_avro_console_producer_args
_kafka_avro_console_producer_args=()
_kafka_avro_console_producer_args+=('--batch-size[ Number of messages to send in a single batch if they are not being sent synchronously. please note that this option will be replaced if max- partition-memory-bytes is also set (default: 16384)]:file:_files')
_kafka_avro_console_producer_args+=('--bootstrap-server[ REQUIRED unless --broker-list (deprecated) is specified. The server (s) to connect to. The broker list string in the form HOST1:PORT1,HOST2: PORT2.]:file:_files')
_kafka_avro_console_producer_args+=('--broker-list[ DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap- server is specified. The broker list string in the form HOST1:PORT1, HOST2:PORT2.]:file:_files')
_kafka_avro_console_producer_args+=('--compression-codec[ The compression codec: either ''none'', ''gzip'', ''snappy'', ''lz4'', or ''zstd''. If specified without value, then it defaults to ''gzip'']:file:_files')
_kafka_avro_console_producer_args+=('--help[ Print usage information.]')
_kafka_avro_console_producer_args+=('--line-reader[ The class name of the class to use for reading lines from standard in. By default each line is read as a separate message. (default: kafka. tools. ConsoleProducer$LineMessageReader)]:file:_files')
_kafka_avro_console_producer_args+=('--max-block-ms[ The max time that the producer will block for during a send request. (default: 60000)]:file:_files')
_kafka_avro_console_producer_args+=('--max-memory-bytes[ The total memory used by the producer to buffer records waiting to be sent to the server. This is the option to control `buffer.memory` in producer configs. (default: 33554432)]:file:_files')
_kafka_avro_console_producer_args+=('--max-partition-memory-bytes[ The buffer size allocated for a partition. When records are received which are smaller than this size the producer will attempt to optimistically group them together until this size is reached. This is the option to control `batch.size` in producer configs. (default: 16384)]:file:_files')
_kafka_avro_console_producer_args+=('--message-send-max-retries[ Brokers can fail receiving the message for multiple reasons, and being unavailable transiently is just one of them. This property specifies the number of retries before the producer give up and drop this message. This is the option to control `retries` in producer configs. (default: 3)]:file:_files')
_kafka_avro_console_producer_args+=('--metadata-expiry-ms[ The period of time in milliseconds after which we force a refresh of metadata even if we haven''t seen any leadership changes. This is the option to control `metadata.max.age. ms` in producer configs. (default: 300000)]:file:_files')
_kafka_avro_console_producer_args+=('--producer-property[ A mechanism to pass user-defined properties in the form key=value to the producer.]:file:_files')
_kafka_avro_console_producer_args+=('--producer.config[ Producer config properties file. Note that \[producer-property\] takes precedence over this config.]:file:_files')
_kafka_avro_console_producer_args+=('--property[ A mechanism to pass user-defined properties in the form key=value to the message reader. This allows custom configuration for a user- defined message reader. Default properties include: parse.key=false parse.headers=false ignore.error=false key.separator= headers.delimiter= headers.separator=, headers.key.separator=: null.marker= When set, any fields (key, value and headers) equal to this will be replaced by null Default parsing pattern when: parse.headers=true and parse.key=true: "h1:v1,h2:v2... key value" parse.key=true: "key value" parse.headers=true: "h1:v1,h2:v2... value"]:file:_files')
_kafka_avro_console_producer_args+=('--request-required-acks[ The required `acks` of the producer requests (default: -1)]:file:_files')
_kafka_avro_console_producer_args+=('--request-timeout-ms[ The ack timeout of the producer requests. Value must be non-negative and non-zero. (default: 1500)]:file:_files')
_kafka_avro_console_producer_args+=('--retry-backoff-ms[ Before each retry, the producer refreshes the metadata of relevant topics. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. This is the option to control `retry.backoff.ms` in producer configs. (default: 100)]:file:_files')
_kafka_avro_console_producer_args+=('--socket-buffer-size[ The size of the tcp RECV size. This is the option to control `send.buffer. bytes` in producer configs. (default: 102400)]:file:_files')
_kafka_avro_console_producer_args+=('--sync[ If set message send requests to the brokers are synchronously, one at a time as they arrive.]')
_kafka_avro_console_producer_args+=('--timeout[ If set and the producer is running in asynchronous mode, this gives the maximum amount of time a message will queue awaiting sufficient batch size. The value is given in ms. This is the option to control `linger.ms` in producer configs. (default: 1000)]:file:_files')
_kafka_avro_console_producer_args+=('--topic[ REQUIRED: The topic id to produce messages to.]:topic:_kafka-list-topic')
_kafka_avro_console_producer_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-avro-console-producer" kafka-avro-console-producer
declare -a _kafka_broker_api_versions_args
_kafka_broker_api_versions_args=()
_kafka_broker_api_versions_args+=('--bootstrap-server[ REQUIRED: The server to connect to.]:file:_files')
_kafka_broker_api_versions_args+=('--command-config[ A property file containing configs to be passed to Admin Client.]:file:_files')
_kafka_broker_api_versions_args+=('--help[ Print usage information.]')
_kafka_broker_api_versions_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-broker-api-versions" kafka-broker-api-versions
declare -a _kafka_configs_args
_kafka_configs_args=()
_kafka_configs_args+=('--add-config[ Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: ''k1=v1, k2=\[v1,v2,v2\],k3=v3''. The following is a list of valid configurations: For entity-type ''topics'': cleanup.policy compression.type confluent.append.record.interceptor. classes confluent.key.schema.validation confluent.key.subject.name.strategy confluent.placement.constraints confluent.prefer.tier.fetch.ms confluent.segment.speculative. prefetch.enable confluent.tier.cleaner.compact.min. efficiency confluent.tier.cleaner.compact. segment.min.bytes confluent.tier.cleaner.dual.compaction confluent.tier.cleaner.enable confluent.tier.cleaner.min.cleanable. ratio confluent.tier.enable confluent.tier.local.hotset.bytes confluent.tier.local.hotset.ms confluent.tier.segment.hotset.roll. min.bytes confluent.value.schema.validation confluent.value.subject.name.strategy delete.retention.ms file.delete.delay.ms flush.messages flush.ms follower.replication.throttled. replicas index.interval.bytes leader.replication.throttled.replicas local.retention.bytes local.retention.ms max.compaction.lag.ms max.message.bytes message.downconversion.enable message.format.version message.timestamp.difference.max.ms message.timestamp.type min.cleanable.dirty.ratio min.compaction.lag.ms min.insync.replicas preallocate remote.storage.enable retention.bytes retention.ms segment.bytes segment.index.bytes segment.jitter.ms segment.ms unclean.leader.election.enable For entity-type ''brokers'': advertised.listeners auto.create.topics.enable background.threads compression.type confluent.append.record.interceptor. classes confluent.backpressure.disk.enable confluent.backpressure.disk.free. threshold.bytes confluent.backpressure.disk.produce. bytes.per.second confluent.backpressure.disk.threshold. recovery.factor confluent.backpressure.request.min. broker.limit confluent.backpressure.request.queue. size.percentile confluent.backpressure.types confluent.balancer.enable confluent.balancer.exclude.topic.names confluent.balancer.exclude.topic. prefixes confluent.balancer.heal.uneven.load. trigger confluent.balancer.throttle.bytes.per. second confluent.broker.health.manager. engine.request.handler.threads.stuck. criteria confluent.broker.health.manager. external.network.mititgation.enabled confluent.broker.health.manager. external.network.sample.duration.ms confluent.broker.health.manager.hard. kill.duration.ms confluent.broker.health.manager. mitigation.enabled confluent.broker.health.manager.num. samples.before.broker.healthy confluent.broker.health.manager.num. samples.before.broker.suspect confluent.broker.health.manager.num. samples.before.broker.unhealthy confluent.broker.health.manager.num. samples.before.network.unhealthy confluent.broker.health.manager. percentage.unhealthy.samples.before. broker.suspect confluent.broker.health.manager. percentage.unhealthy.samples.before. broker.unhealthy confluent.broker.health.manager. sample.duration.ms confluent.broker.health.manager. storage.background.threads.stuck. criteria confluent.broker.health.manager. storage.network.threads.stuck. criteria confluent.broker.health.manager. storage.request.handler.threads. stuck.criteria confluent.broker.load.average.service. request.time.ms confluent.broker.load.workload. coefficient confluent.clm.enabled confluent.clm.frequency.in.hours confluent.clm.max.backup.days confluent.clm.min.delay.in.minutes confluent.clm.topic.retention.days.to. backup.days confluent.cluster.link.fetch.response. min.bytes confluent.cluster.link.fetch.response. total.bytes confluent.cluster.link.io.max.bytes. per.second confluent.cluster.link.replication. quota.mode confluent.durability.audit.checks confluent.durability.events.allowed confluent.eligible.controllers confluent.fetch.partition.pruning. enable confluent.metadata.server.cluster. registry.clusters confluent.plugins.cluster.link.policy. max.destination.links.per.tenant confluent.plugins.cluster.link.policy. max.source.links.per.tenant confluent.plugins.topic.policy.max. partitions.per.tenant confluent.prefer.tier.fetch.ms confluent.quota.tenant.default. controller.mutation.rate confluent.quota.tenant.fetch. multiplier confluent.quota.tenant.produce. multiplier confluent.quota.tenant.user.quotas. enable confluent.reporters.telemetry.auto. enable confluent.security.event.router.config confluent.segment.speculative. prefetch.enable confluent.telemetry.enabled confluent.tier.cleaner.compact.min. efficiency confluent.tier.cleaner.compact. segment.min.bytes confluent.tier.cleaner.dedupe.buffer. size confluent.tier.cleaner.dual.compaction confluent.tier.cleaner.enable confluent.tier.cleaner.io.buffer.load. factor confluent.tier.cleaner.io.buffer.size confluent.tier.cleaner.io.max.bytes. per.second confluent.tier.cleaner.min.cleanable. ratio confluent.tier.cleaner.num.threads confluent.tier.enable confluent.tier.fetcher.memorypool. bytes confluent.tier.local.hotset.bytes confluent.tier.local.hotset.ms confluent.tier.max.partition.fetch. bytes.override confluent.tier.segment.hotset.roll. min.bytes confluent.tier.topic.delete.backoff.ms confluent.tier.topic.delete.check. interval.ms confluent.tier.topic.delete.max. inprogress.partitions confluent.transaction.logging. verbosity connections.max.age.ms fetch.max.bytes floor.max.connection.creation.rate follower.replication.throttled.rate follower.replication.throttled. replicas leader.replication.throttled.rate leader.replication.throttled.replicas listener.security.protocol.map listeners log.cleaner.backoff.ms log.cleaner.dedupe.buffer.size log.cleaner.delete.retention.ms log.cleaner.io.buffer.load.factor log.cleaner.io.buffer.size log.cleaner.io.max.bytes.per.second log.cleaner.max.compaction.lag.ms log.cleaner.min.cleanable.ratio log.cleaner.min.compaction.lag.ms log.cleaner.threads log.cleanup.policy log.deletion.max.segments.per.run log.flush.interval.messages log.flush.interval.ms log.index.interval.bytes log.index.size.max.bytes log.message.downconversion.enable log.message.timestamp.difference.max. ms log.message.timestamp.type log.preallocate log.retention.bytes log.retention.ms log.roll.jitter.ms log.roll.ms log.segment.bytes log.segment.delete.delay.ms max.connection.creation.rate max.connection.creation.rate.per.ip. enable.threshold max.connections max.connections.per.ip max.connections.per.ip.overrides message.max.bytes metric.reporters min.insync.replicas num.io.threads num.network.threads num.partitions num.recovery.threads.per.data.dir num.replica.fetchers principal.builder.class quotas.expiration.time.ms replica.alter.log.dirs.io.max.bytes. per.second sasl.enabled.mechanisms sasl.jaas.config sasl.kerberos.kinit.cmd sasl.kerberos.min.time.before.relogin sasl.kerberos.principal.to.local.rules sasl.kerberos.service.name sasl.kerberos.ticket.renew.jitter sasl.kerberos.ticket.renew.window. factor sasl.login.refresh.buffer.seconds sasl.login.refresh.min.period.seconds sasl.login.refresh.window.factor sasl.login.refresh.window.jitter sasl.mechanism.inter.broker.protocol socket.send.buffer.bytes ssl.cipher.suites ssl.client.auth ssl.enabled.protocols ssl.endpoint.identification.algorithm ssl.engine.factory.class ssl.key.password ssl.keymanager.algorithm ssl.keystore.certificate.chain ssl.keystore.key ssl.keystore.location ssl.keystore.password ssl.keystore.type ssl.protocol ssl.provider ssl.secure.random.implementation ssl.trustmanager.algorithm ssl.truststore.certificates ssl.truststore.location ssl.truststore.password ssl.truststore.type unclean.leader.election.enable For entity-type ''users'': SCRAM-SHA-256 SCRAM-SHA-512 consumer_byte_rate controller_mutation_rate producer_byte_rate request_percentage For entity-type ''clients'': consumer_byte_rate controller_mutation_rate producer_byte_rate request_percentage For entity-type ''ips'': connection_creation_rate For entity-type ''cluster-links'': acl.filters acl.sync.enable acl.sync.ms auto.create.mirror.topics.enable auto.create.mirror.topics.filters availability.check.consecutive. failure.threshold availability.check.ms bootstrap.servers client.dns.lookup cluster.link.paused cluster.link.prefix cluster.link.retry.timeout.ms confluent.replica.fetch.backoff.max.ms connection.mode connections.max.idle.ms consumer.group.prefix.enable consumer.offset.group.filters consumer.offset.sync.enable consumer.offset.sync.ms link.fetcher.enforce.max.lagging. partitions.ms link.fetcher.flow.control link.fetcher.lagging.partition.ms link.fetcher.max.lagging.partitions link.mode local.listener.name local.sasl.client.callback.handler. class local.sasl.jaas.config local.sasl.kerberos.kinit.cmd local.sasl.kerberos.min.time.before. relogin local.sasl.kerberos.service.name local.sasl.kerberos.ticket.renew. jitter local.sasl.kerberos.ticket.renew. window.factor local.sasl.login.callback.handler. class local.sasl.login.class local.sasl.login.connect.timeout.ms local.sasl.login.read.timeout.ms local.sasl.login.refresh.buffer. seconds local.sasl.login.refresh.min.period. seconds local.sasl.login.refresh.window.factor local.sasl.login.refresh.window.jitter local.sasl.login.retry.backoff.max.ms local.sasl.login.retry.backoff.ms local.sasl.mechanism local.sasl.oauthbearer.clock.skew. seconds local.sasl.oauthbearer.expected. audience local.sasl.oauthbearer.expected.issuer local.sasl.oauthbearer.jwks.endpoint. refresh.ms local.sasl.oauthbearer.jwks.endpoint. retry.backoff.max.ms local.sasl.oauthbearer.jwks.endpoint. retry.backoff.ms local.sasl.oauthbearer.jwks.endpoint. url local.sasl.oauthbearer.scope.claim. name local.sasl.oauthbearer.sub.claim.name local.sasl.oauthbearer.token.endpoint. url local.security.protocol local.ssl.cipher.suites local.ssl.enabled.protocols local.ssl.endpoint.identification. algorithm local.ssl.engine.factory.class local.ssl.key.password local.ssl.keymanager.algorithm local.ssl.keystore.certificate.chain local.ssl.keystore.key local.ssl.keystore.location local.ssl.keystore.password local.ssl.keystore.type local.ssl.protocol local.ssl.provider local.ssl.secure.random.implementation local.ssl.trustmanager.algorithm local.ssl.truststore.certificates local.ssl.truststore.location local.ssl.truststore.password local.ssl.truststore.type metadata.max.age.ms num.cluster.link.fetchers reconnect.backoff.max.ms reconnect.backoff.ms replica.fetch.backoff.ms replica.fetch.max.bytes replica.fetch.min.bytes replica.fetch.response.max.bytes replica.fetch.wait.max.ms replica.socket.receive.buffer.bytes replica.socket.timeout.ms request.timeout.ms retry.backoff.ms reverse.connection.setup.timeout.ms sasl.client.callback.handler.class sasl.jaas.config sasl.kerberos.kinit.cmd sasl.kerberos.min.time.before.relogin sasl.kerberos.service.name sasl.kerberos.ticket.renew.jitter sasl.kerberos.ticket.renew.window. factor sasl.login.callback.handler.class sasl.login.class sasl.login.connect.timeout.ms sasl.login.read.timeout.ms sasl.login.refresh.buffer.seconds sasl.login.refresh.min.period.seconds sasl.login.refresh.window.factor sasl.login.refresh.window.jitter sasl.login.retry.backoff.max.ms sasl.login.retry.backoff.ms sasl.mechanism sasl.oauthbearer.clock.skew.seconds sasl.oauthbearer.expected.audience sasl.oauthbearer.expected.issuer sasl.oauthbearer.jwks.endpoint. refresh.ms sasl.oauthbearer.jwks.endpoint.retry. backoff.max.ms sasl.oauthbearer.jwks.endpoint.retry. backoff.ms sasl.oauthbearer.jwks.endpoint.url sasl.oauthbearer.scope.claim.name sasl.oauthbearer.sub.claim.name sasl.oauthbearer.token.endpoint.url security.protocol socket.connection.setup.timeout.max.ms socket.connection.setup.timeout.ms ssl.cipher.suites ssl.enabled.protocols ssl.endpoint.identification.algorithm ssl.engine.factory.class ssl.key.password ssl.keymanager.algorithm ssl.keystore.certificate.chain ssl.keystore.key ssl.keystore.location ssl.keystore.password ssl.keystore.type ssl.protocol ssl.provider ssl.secure.random.implementation ssl.trustmanager.algorithm ssl.truststore.certificates ssl.truststore.location ssl.truststore.password ssl.truststore.type topic.config.sync.include topic.config.sync.ms Entity types ''users'' and ''clients'' may be specified together to update config for clients of a specific user.]:file:_files')
_kafka_configs_args+=('--add-config-file[ Path to a properties file with configs to add. See add-config for a list of valid configurations.]:file:_files')
_kafka_configs_args+=('--all[ List all configs for the given topic, broker, or broker-logger entity (includes static configuration when the entity type is brokers)]')
_kafka_configs_args+=('--alter[ Alter the configuration for the entity.]')
_kafka_configs_args+=('--bootstrap-server[ The Kafka server to connect to. This is required for describing and altering broker configs.]:file:_files')
_kafka_configs_args+=('--broker[ The broker''s ID.]:file:_files')
_kafka_configs_args+=('--broker-defaults[ The config defaults for all brokers.]')
_kafka_configs_args+=('--broker-logger[ The broker''s ID for its logger config.]:file:_files')
_kafka_configs_args+=('--client[ The client''s ID.]:file:_files')
_kafka_configs_args+=('--client-defaults[ The config defaults for all clients.]')
_kafka_configs_args+=('--cluster-link[ The cluster link''s name.]:file:_files')
_kafka_configs_args+=('--command-config[ Property file containing configs to be passed to Admin Client. This is used only with --bootstrap-server option for describing and altering broker configs.]:file:_files')
_kafka_configs_args+=('--delete-config[ config keys to remove ''k1,k2'']:file:_files')
_kafka_configs_args+=('--describe[ List configs for the given entity.]')
_kafka_configs_args+=('--entity-default[ Default entity name for clients/users/brokers/ips (applies to corresponding entity type in command line)]')
_kafka_configs_args+=('--entity-name[ Name of entity (topic name/client id/user principal name/broker id/ip/cluster link)]:file:_files')
_kafka_configs_args+=('--entity-type[ Type of entity (topics/clients/users/brokers/broker- loggers/ips/cluster-links)]:file:_files')
_kafka_configs_args+=('--force[ Suppress console prompts]')
_kafka_configs_args+=('--help[ Print usage information.]')
_kafka_configs_args+=('--ip[ The IP address.]:file:_files')
_kafka_configs_args+=('--ip-defaults[ The config defaults for all IPs.]')
_kafka_configs_args+=('--replica-placement[ This configuration is a JSON object that controls the set of brokers (replicas) which will always be allowed to join the ISR. And the set of brokers (observers) which are not allowed to join the ISR. The format of JSON is: { "version": 1, "replicas": \[ { "count": 2, "constraints": {"rack": "east-1"} }, { "count": 1, "constraints": {"rack": "east-2"} } \], "observers":\[ { "count": 1, "constraints": {"rack": "west-1"} } \] }]:file:_files')
_kafka_configs_args+=('--topic[ The topic''s name.]:topic:_kafka-list-topic')
_kafka_configs_args+=('--user[ The user''s principal name.]:file:_files')
_kafka_configs_args+=('--user-defaults[ The config defaults for all users.]')
_kafka_configs_args+=('--version[ Display Kafka version.]')
_kafka_configs_args+=('--zk-tls-config-file[ Identifies the file where ZooKeeper client TLS connectivity properties are defined. Any properties other than zookeeper.clientCnxnSocket, zookeeper.ssl.cipher.suites, zookeeper.ssl.client.enable, zookeeper.ssl.crl.enable, zookeeper. ssl.enabled.protocols, zookeeper.ssl. endpoint.identification.algorithm, zookeeper.ssl.keystore.location, zookeeper.ssl.keystore.password, zookeeper.ssl.keystore.type, zookeeper.ssl.ocsp.enable, zookeeper. ssl.protocol, zookeeper.ssl. truststore.location, zookeeper.ssl. truststore.password, zookeeper.ssl. truststore.type are ignored.]:file:_files')
_kafka_configs_args+=('--zookeeper[ DEPRECATED. The connection string for the zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over. Required when configuring SCRAM credentials for users or dynamic broker configs when the relevant broker(s) are down. Not allowed otherwise.]:file:_files')
compdef "_kafka-command kafka-configs" kafka-configs
declare -a _kafka_console_consumer_args
_kafka_console_consumer_args=()
_kafka_console_consumer_args+=('--bootstrap-server[ REQUIRED: The server(s) to connect to.]:file:_files')
_kafka_console_consumer_args+=('--consumer-property[ A mechanism to pass user-defined properties in the form key=value to the consumer.]:file:_files')
_kafka_console_consumer_args+=('--consumer.config[ Consumer config properties file. Note that \[consumer-property\] takes precedence over this config.]:file:_files')
_kafka_console_consumer_args+=('--enable-systest-events[ Log lifecycle events of the consumer in addition to logging consumed messages. (This is specific for system tests.)]')
_kafka_console_consumer_args+=('--formatter[ The name of a class to use for formatting kafka messages for display. (default: kafka.tools. DefaultMessageFormatter)]:file:_files')
_kafka_console_consumer_args+=('--from-beginning[ If the consumer does not already have an established offset to consume from, start with the earliest message present in the log rather than the latest message.]')
_kafka_console_consumer_args+=('--group[ The consumer group id of the consumer.]:file:_files')
_kafka_console_consumer_args+=('--help[ Print usage information.]')
_kafka_console_consumer_args+=('--include[ Regular expression specifying list of topics to include for consumption.]:file:_files')
_kafka_console_consumer_args+=('--isolation-level[ Set to read_committed in order to filter out transactional messages which are not committed. Set to read_uncommitted to read all messages. (default: read_uncommitted)]:file:_files')
_kafka_console_consumer_args+=('--key-deserializer[]:file:_files')
_kafka_console_consumer_args+=('--max-messages[ The maximum number of messages to consume before exiting. If not set, consumption is continual.]:file:_files')
_kafka_console_consumer_args+=('--offset[ The offset to consume from (a non- negative number), or ''earliest'' which means from beginning, or ''latest'' which means from end (default: latest)]:file:_files')
_kafka_console_consumer_args+=('--partition[ The partition to consume from. Consumption starts from the end of the partition unless ''--offset'' is specified.]:file:_files')
_kafka_console_consumer_args+=('--property[ The properties to initialize the message formatter. Default properties include: print.timestamp=true|false print.key=true|false print.offset=true|false print.partition=true|false print.headers=true|false print.value=true|false key.separator=<key.separator> line.separator=<line.separator> headers.separator=<line.separator> null.literal=<null.literal> key.deserializer=<key.deserializer> value.deserializer=<value. deserializer> header.deserializer=<header. deserializer> Users can also pass in customized properties for their formatter; more specifically, users can pass in properties keyed with ''key. deserializer.'', ''value. deserializer.'' and ''headers. deserializer.'' prefixes to configure their deserializers.]:file:_files')
_kafka_console_consumer_args+=('--skip-message-on-error[ If there is an error when processing a message, skip it instead of halt.]')
_kafka_console_consumer_args+=('--timeout-ms[ If specified, exit if no message is available for consumption for the specified interval.]:file:_files')
_kafka_console_consumer_args+=('--topic[ The topic to consume on.]:topic:_kafka-list-topic')
_kafka_console_consumer_args+=('--value-deserializer[]:file:_files')
_kafka_console_consumer_args+=('--version[ Display Kafka version.]')
_kafka_console_consumer_args+=('--whitelist[ DEPRECATED, use --include instead; ignored if --include specified. Regular expression specifying list of topics to include for consumption.]:file:_files')
compdef "_kafka-command kafka-console-consumer" kafka-console-consumer
declare -a _kafka_console_producer_args
_kafka_console_producer_args=()
_kafka_console_producer_args+=('--batch-size[ Number of messages to send in a single batch if they are not being sent synchronously. please note that this option will be replaced if max- partition-memory-bytes is also set (default: 16384)]:file:_files')
_kafka_console_producer_args+=('--bootstrap-server[ REQUIRED unless --broker-list (deprecated) is specified. The server (s) to connect to. The broker list string in the form HOST1:PORT1,HOST2: PORT2.]:file:_files')
_kafka_console_producer_args+=('--broker-list[ DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap- server is specified. The broker list string in the form HOST1:PORT1, HOST2:PORT2.]:file:_files')
_kafka_console_producer_args+=('--compression-codec[ The compression codec: either ''none'', ''gzip'', ''snappy'', ''lz4'', or ''zstd''. If specified without value, then it defaults to ''gzip'']:file:_files')
_kafka_console_producer_args+=('--help[ Print usage information.]')
_kafka_console_producer_args+=('--line-reader[ The class name of the class to use for reading lines from standard in. By default each line is read as a separate message. (default: kafka. tools. ConsoleProducer$LineMessageReader)]:file:_files')
_kafka_console_producer_args+=('--max-block-ms[ The max time that the producer will block for during a send request. (default: 60000)]:file:_files')
_kafka_console_producer_args+=('--max-memory-bytes[ The total memory used by the producer to buffer records waiting to be sent to the server. This is the option to control `buffer.memory` in producer configs. (default: 33554432)]:file:_files')
_kafka_console_producer_args+=('--max-partition-memory-bytes[ The buffer size allocated for a partition. When records are received which are smaller than this size the producer will attempt to optimistically group them together until this size is reached. This is the option to control `batch.size` in producer configs. (default: 16384)]:file:_files')
_kafka_console_producer_args+=('--message-send-max-retries[ Brokers can fail receiving the message for multiple reasons, and being unavailable transiently is just one of them. This property specifies the number of retries before the producer give up and drop this message. This is the option to control `retries` in producer configs. (default: 3)]:file:_files')
_kafka_console_producer_args+=('--metadata-expiry-ms[ The period of time in milliseconds after which we force a refresh of metadata even if we haven''t seen any leadership changes. This is the option to control `metadata.max.age. ms` in producer configs. (default: 300000)]:file:_files')
_kafka_console_producer_args+=('--producer-property[ A mechanism to pass user-defined properties in the form key=value to the producer.]:file:_files')
_kafka_console_producer_args+=('--producer.config[ Producer config properties file. Note that \[producer-property\] takes precedence over this config.]:file:_files')
_kafka_console_producer_args+=('--property[ A mechanism to pass user-defined properties in the form key=value to the message reader. This allows custom configuration for a user- defined message reader. Default properties include: parse.key=false parse.headers=false ignore.error=false key.separator= headers.delimiter= headers.separator=, headers.key.separator=: null.marker= When set, any fields (key, value and headers) equal to this will be replaced by null Default parsing pattern when: parse.headers=true and parse.key=true: "h1:v1,h2:v2... key value" parse.key=true: "key value" parse.headers=true: "h1:v1,h2:v2... value"]:file:_files')
_kafka_console_producer_args+=('--request-required-acks[ The required `acks` of the producer requests (default: -1)]:file:_files')
_kafka_console_producer_args+=('--request-timeout-ms[ The ack timeout of the producer requests. Value must be non-negative and non-zero. (default: 1500)]:file:_files')
_kafka_console_producer_args+=('--retry-backoff-ms[ Before each retry, the producer refreshes the metadata of relevant topics. Since leader election takes a bit of time, this property specifies the amount of time that the producer waits before refreshing the metadata. This is the option to control `retry.backoff.ms` in producer configs. (default: 100)]:file:_files')
_kafka_console_producer_args+=('--socket-buffer-size[ The size of the tcp RECV size. This is the option to control `send.buffer. bytes` in producer configs. (default: 102400)]:file:_files')
_kafka_console_producer_args+=('--sync[ If set message send requests to the brokers are synchronously, one at a time as they arrive.]')
_kafka_console_producer_args+=('--timeout[ If set and the producer is running in asynchronous mode, this gives the maximum amount of time a message will queue awaiting sufficient batch size. The value is given in ms. This is the option to control `linger.ms` in producer configs. (default: 1000)]:file:_files')
_kafka_console_producer_args+=('--topic[ REQUIRED: The topic id to produce messages to.]:topic:_kafka-list-topic')
_kafka_console_producer_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-console-producer" kafka-console-producer
declare -a _kafka_consumer_groups_args
_kafka_consumer_groups_args=()
_kafka_consumer_groups_args+=('--all-groups[ Apply to all consumer groups.]')
_kafka_consumer_groups_args+=('--all-topics[ Consider all topics assigned to a group in the `reset-offsets` process.]')
_kafka_consumer_groups_args+=('--bootstrap-server[ REQUIRED: The server(s) to connect to.]:file:_files')
_kafka_consumer_groups_args+=('--by-duration[ Reset offsets to offset by duration from current timestamp. Format: ''PnDTnHnMnS'']:file:_files')
_kafka_consumer_groups_args+=('--command-config[ Property file containing configs to be passed to Admin Client and Consumer.]:file:_files')
_kafka_consumer_groups_args+=('--delete[ Pass in groups to delete topic partition offsets and ownership information over the entire consumer group. For instance --group g1 -- group g2]')
_kafka_consumer_groups_args+=('--delete-offsets[ Delete offsets of consumer group. Supports one consumer group at the time, and multiple topics.]')
_kafka_consumer_groups_args+=('--describe[ Describe consumer group and list offset lag (number of messages not yet processed) related to given group.]')
_kafka_consumer_groups_args+=('--dry-run[ Only show results without executing changes on Consumer Groups. Supported operations: reset-offsets.]')
_kafka_consumer_groups_args+=('--execute[ Execute operation. Supported operations: reset-offsets.]')
_kafka_consumer_groups_args+=('--export[ Export operation execution to a CSV file. Supported operations: reset- offsets.]')
_kafka_consumer_groups_args+=('--from-file[ Reset offsets to values defined in CSV file.]:file:_files')
_kafka_consumer_groups_args+=('--group[ The consumer group we wish to act on.]:file:_files')
_kafka_consumer_groups_args+=('--help[ Print usage information.]')
_kafka_consumer_groups_args+=('--list[ List all consumer groups.]')
_kafka_consumer_groups_args+=('--members[ Describe members of the group. This option may be used with ''--describe'' and ''--bootstrap-server'' options only. Example: --bootstrap-server localhost: 9092 --describe --group group1 -- members]')
_kafka_consumer_groups_args+=('--offsets[ Describe the group and list all topic partitions in the group along with their offset lag. This is the default sub-action of and may be used with ''--describe'' and ''-- bootstrap-server'' options only. Example: --bootstrap-server localhost: 9092 --describe --group group1 -- offsets]')
_kafka_consumer_groups_args+=('--reset-offsets[ Reset offsets of consumer group. Supports one consumer group at the time, and instances should be inactive Has 2 execution options: --dry-run (the default) to plan which offsets to reset, and --execute to update the offsets. Additionally, the -- export option is used to export the results to a CSV format. You must choose one of the following reset specifications: --to-datetime, --by-period, --to-earliest, --to- latest, --shift-by, --from-file, -- to-current. To define the scope use --all-topics or --topic. One scope must be specified unless you use ''--from- file''.]')
_kafka_consumer_groups_args+=('--shift-by[ Reset offsets shifting current offset by ''n'', where ''n'' can be positive or negative.]:file:_files')
_kafka_consumer_groups_args+=('--state[ When specified with ''--describe'', includes the state of the group. Example: --bootstrap-server localhost: 9092 --describe --group group1 -- state When specified with ''--list'', it displays the state of all groups. It can also be used to list groups with specific states. Example: --bootstrap-server localhost: 9092 --list --state stable,empty This option may be used with ''-- describe'', ''--list'' and ''--bootstrap- server'' options only.]:file:_files')
_kafka_consumer_groups_args+=('--timeout[ The timeout that can be set for some use cases. For example, it can be used when describing the group to specify the maximum amount of time in milliseconds to wait before the group stabilizes (when the group is just created, or is going through some changes). (default: 5000)]:file:_files')
_kafka_consumer_groups_args+=('--to-current[ Reset offsets to current offset.]')
_kafka_consumer_groups_args+=('--to-datetime[ Reset offsets to offset from datetime. Format: ''YYYY-MM-DDTHH:mm:SS.sss'']:file:_files')
_kafka_consumer_groups_args+=('--to-earliest[ Reset offsets to earliest offset.]')
_kafka_consumer_groups_args+=('--to-latest[ Reset offsets to latest offset.]')
_kafka_consumer_groups_args+=('--to-offset[ Reset offsets to a specific offset.]:file:_files')
_kafka_consumer_groups_args+=('--topic[ The topic whose consumer group information should be deleted or topic whose should be included in the reset offset process. In `reset- offsets` case, partitions can be specified using this format: `topic1: 0,1,2`, where 0,1,2 are the partition to be included in the process. Reset-offsets also supports multiple topic inputs.]:topic:_kafka-list-topic')
_kafka_consumer_groups_args+=('--verbose[ Provide additional information, if any, when describing the group. This option may be used with ''-- offsets''/''--members''/''--state'' and ''--bootstrap-server'' options only. Example: --bootstrap-server localhost: 9092 --describe --group group1 -- members --verbose]')
_kafka_consumer_groups_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-consumer-groups" kafka-consumer-groups
declare -a _kafka_consumer_perf_test_args
_kafka_consumer_perf_test_args=()
_kafka_consumer_perf_test_args+=('--bootstrap-server[ REQUIRED unless --broker-list (deprecated) is specified. The server (s) to connect to.]:file:_files')
_kafka_consumer_perf_test_args+=('--broker-list[ DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap- server is specified. The broker list string in the form HOST1:PORT1, HOST2:PORT2.]:file:_files')
_kafka_consumer_perf_test_args+=('--consumer.config[ Consumer config properties file.]:file:_files')
_kafka_consumer_perf_test_args+=('--date-format[ The date format to use for formatting the time field. See java.text. SimpleDateFormat for options. (default: yyyy-MM-dd HH:mm:ss:SSS)]:file:_files')
_kafka_consumer_perf_test_args+=('--fetch-size[ The amount of data to fetch in a single request. (default: 1048576)]:file:_files')
_kafka_consumer_perf_test_args+=('--from-latest[ If the consumer does not already have an established offset to consume from, start with the latest message present in the log rather than the earliest message.]')
_kafka_consumer_perf_test_args+=('--group[ The group id to consume on. (default: perf-consumer-34070)]:file:_files')
_kafka_consumer_perf_test_args+=('--help[ Print usage information.]')
_kafka_consumer_perf_test_args+=('--hide-header[ If set, skips printing the header for the stats]')
_kafka_consumer_perf_test_args+=('--messages[ REQUIRED: The number of messages to send or consume]:file:_files')
_kafka_consumer_perf_test_args+=('--num-fetch-threads[ DEPRECATED AND IGNORED: Number of fetcher threads. (default: 1)]:file:_files')
_kafka_consumer_perf_test_args+=('--print-metrics[ Print out the metrics.]')
_kafka_consumer_perf_test_args+=('--reporting-interval[ Interval in milliseconds at which to print progress info. (default: 5000)]:file:_files')
_kafka_consumer_perf_test_args+=('--show-detailed-stats[ If set, stats are reported for each reporting interval as configured by reporting-interval]')
_kafka_consumer_perf_test_args+=('--socket-buffer-size[ The size of the tcp RECV size. (default: 2097152)]:file:_files')
_kafka_consumer_perf_test_args+=('--threads[ DEPRECATED AND IGNORED: Number of processing threads. (default: 10)]:file:_files')
_kafka_consumer_perf_test_args+=('--timeout[ The maximum allowed time in milliseconds between returned records. (default: 10000)]:file:_files')
_kafka_consumer_perf_test_args+=('--topic[ REQUIRED: The topic to consume from.]:topic:_kafka-list-topic')
_kafka_consumer_perf_test_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-consumer-perf-test" kafka-consumer-perf-test
declare -a _kafka_delegation_tokens_args
_kafka_delegation_tokens_args=()
_kafka_delegation_tokens_args+=('--bootstrap-server[ REQUIRED: server(s) to use for bootstrapping.]:file:_files')
_kafka_delegation_tokens_args+=('--command-config[ REQUIRED: A property file containing configs to be passed to Admin Client. Token management operations are allowed in secure mode only. This config file is used to pass security related configs.]:file:_files')
_kafka_delegation_tokens_args+=('--create[ Create a new delegation token. Use --renewer- principal option to pass renewers principals.]')
_kafka_delegation_tokens_args+=('--describe[ Describe delegation tokens for the given principals. Use --owner-principal to pass owner/renewer principals. If --owner-principal option is not supplied, all the user owned tokens and tokens where user have Describe permission will be returned.]')
_kafka_delegation_tokens_args+=('--expire[ Expire delegation token. Use --expiry-time- period option to expire the token.]')
_kafka_delegation_tokens_args+=('--expiry-time-period[ Expiry time period in milliseconds. If the value is -1, then the token will get invalidated immediately.]:file:_files')
_kafka_delegation_tokens_args+=('--help[ Print usage information.]')
_kafka_delegation_tokens_args+=('--hmac[ HMAC of the delegation token]:file:_files')
_kafka_delegation_tokens_args+=('--max-life-time-period[ Max life period for the token in milliseconds. If the value is -1, then token max life time will default to a server side config value (delegation.token.max.lifetime.ms).]:file:_files')
_kafka_delegation_tokens_args+=('--owner-principal[ owner is a kafka principal. It is should be in principalType:name format.]:file:_files')
_kafka_delegation_tokens_args+=('--renew[ Renew delegation token. Use --renew-time-period option to set renew time period.]')
_kafka_delegation_tokens_args+=('--renew-time-period[ Renew time period in milliseconds. If the value is -1, then the renew time period will default to a server side config value (delegation. token.expiry.time.ms).]:file:_files')
_kafka_delegation_tokens_args+=('--renewer-principal[ renewer is a kafka principal. It is should be in principalType:name format.]:file:_files')
_kafka_delegation_tokens_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-delegation-tokens" kafka-delegation-tokens
declare -a _kafka_topics_args
_kafka_topics_args=()
_kafka_topics_args+=('--alter[ Alter the number of partitions, replica assignment, and/or configuration for the topic.]')
_kafka_topics_args+=('--at-min-isr-partitions[ if set when describing topics, only show partitions whose isr count is equal to the configured minimum.]')
_kafka_topics_args+=('--bootstrap-server[ REQUIRED: The Kafka server to connect to.]:file:_files')
_kafka_topics_args+=('--command-config[ Property file containing configs to be passed to Admin Client. This is used only with --bootstrap-server option for describing and altering broker configs.]:file:_files')
_kafka_topics_args+=('--config[ A topic configuration override for the topic being created or altered. The following is a list of valid configurations: cleanup.policy compression.type confluent.append.record.interceptor. classes confluent.key.schema.validation confluent.key.subject.name.strategy confluent.placement.constraints confluent.prefer.tier.fetch.ms confluent.segment.speculative. prefetch.enable confluent.tier.cleaner.compact.min. efficiency confluent.tier.cleaner.compact. segment.min.bytes confluent.tier.cleaner.dual.compaction confluent.tier.cleaner.enable confluent.tier.cleaner.min.cleanable. ratio confluent.tier.enable confluent.tier.local.hotset.bytes confluent.tier.local.hotset.ms confluent.tier.segment.hotset.roll. min.bytes confluent.value.schema.validation confluent.value.subject.name.strategy delete.retention.ms file.delete.delay.ms flush.messages flush.ms follower.replication.throttled. replicas index.interval.bytes leader.replication.throttled.replicas local.retention.bytes local.retention.ms max.compaction.lag.ms max.message.bytes message.downconversion.enable message.format.version message.timestamp.difference.max.ms message.timestamp.type min.cleanable.dirty.ratio min.compaction.lag.ms min.insync.replicas preallocate remote.storage.enable retention.bytes retention.ms segment.bytes segment.index.bytes segment.jitter.ms segment.ms unclean.leader.election.enable See the Kafka documentation for full details on the topic configs. It is supported only in combination with -- create if --bootstrap-server option is used (the kafka-configs CLI supports altering topic configs with a --bootstrap-server option).]:file:_files')
_kafka_topics_args+=('--create[ Create a new topic.]')
_kafka_topics_args+=('--delete[ Delete a topic]')
_kafka_topics_args+=('--delete-config[ A topic configuration override to be removed for an existing topic (see the list of configurations under the --config option). Not supported with the --bootstrap-server option.]:file:_files')
_kafka_topics_args+=('--describe[ List details for the given topics.]')
_kafka_topics_args+=('--disable-rack-aware[ Disable rack aware replica assignment]')
_kafka_topics_args+=('--exclude-internal[ exclude internal topics when running list or describe command. The internal topics will be listed by default]')
_kafka_topics_args+=('--help[ Print usage information.]')
_kafka_topics_args+=('--if-exists[ if set when altering or deleting or describing topics, the action will only execute if the topic exists.]')
_kafka_topics_args+=('--if-not-exists[ if set when creating topics, the action will only execute if the topic does not already exist.]')
_kafka_topics_args+=('--invalid-replica-placement-partitions[ if set when describing topics, only show partitions whose placement doesn''t adhere to the replica placement constraints.]')
_kafka_topics_args+=('--list[ List all available topics.]')
_kafka_topics_args+=('--partitions[ The number of partitions for the topic being created or altered (WARNING: If partitions are increased for a topic that has a key, the partition logic or ordering of the messages will be affected). If not supplied for create, defaults to the cluster default.]:file:_files')
_kafka_topics_args+=('--replica-assignment[ A list of manual partition-to-broker assignments for the topic being created or altered.]:file:_files')
_kafka_topics_args+=('--replica-placement[ This configuration is a JSON object that controls the set of brokers (replicas) which will always be allowed to join the ISR. And the set of brokers (observers) which are not allowed to join the ISR. The format of JSON is: { "version": 1, "replicas": \[ { "count": 2, "constraints": {"rack": "east-1"} }, { "count": 1, "constraints": {"rack": "east-2"} } \], "observers":\[ { "count": 1, "constraints": {"rack": "west-1"} } \] }]:file:_files')
_kafka_topics_args+=('--replication-factor[ The replication factor for each partition in the topic being created. If not supplied, defaults to the cluster default.]:file:_files')
_kafka_topics_args+=('--topic[ The topic to create, alter, describe or delete. It also accepts a regular expression, except for --create option. Put topic name in double quotes and use the ''\'' prefix to escape regular expression symbols; e. g. "test\.topic".]:topic:_kafka-list-topic')
_kafka_topics_args+=('--topic-id[ The topic-id to describe.This is used only with --bootstrap-server option for describing topics.]:topic:_kafka-list-topic')
_kafka_topics_args+=('--topics-with-overrides[ if set when describing topics, only show topics that have overridden configs]')
_kafka_topics_args+=('--unavailable-partitions[ if set when describing topics, only show partitions whose leader is not available]')
_kafka_topics_args+=('--under-min-isr-partitions[ if set when describing topics, only show partitions whose isr count is less than the configured minimum.]')
_kafka_topics_args+=('--under-replicated-partitions[ if set when describing topics, only show under replicated partitions]')
_kafka_topics_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-topics" kafka-topics
declare -a _kafka_producer_perf_test_args
_kafka_producer_perf_test_args=()
_kafka_producer_perf_test_args+=('--topic[ --topic TOPIC produce messages to this topic]')
_kafka_producer_perf_test_args+=('--num-records[ --num-records NUM-RECORDS number of messages to produce]')
_kafka_producer_perf_test_args+=('--payload-delimiter[ --payload-delimiter PAYLOAD-DELIMITER provides delimiter to be used when --payload-file is provided. Defaults to new line. Note that this parameter will be ignored if --payload-file is not provided. (default: )]')
_kafka_producer_perf_test_args+=('--throughput[ --throughput THROUGHPUT throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling.]')
_kafka_producer_perf_test_args+=('--producer-props[ --producer-props PROP-NAME=PROP-VALUE \[PROP-NAME=PROP-VALUE ...\] kafka producer related configuration properties like bootstrap.servers,client.id etc. These configs take precedence over those passed via --producer.config.]')
_kafka_producer_perf_test_args+=('--producer.config[ --producer.config CONFIG-FILE producer config properties file.]')
_kafka_producer_perf_test_args+=('--print-metrics[ --print-metrics print out metrics at the end of the test. (default: false)]')
_kafka_producer_perf_test_args+=('--transactional-id[ --transactional-id TRANSACTIONAL-ID The transactionalId to use if transaction-duration-ms is > 0. Useful when testing the performance of concurrent transactions. (default: performance-producer-default-transactional-id)]')
_kafka_producer_perf_test_args+=('--transaction-duration-ms[ --transaction-duration-ms TRANSACTION-DURATION The max age of each transaction. The commitTransaction will be called after this time has elapsed. Transactions are only enabled if this value is positive. (default: 0) either --record-size or --payload-file must be specified but not both.]')
_kafka_producer_perf_test_args+=('--record-size[ --record-size RECORD-SIZE message size in bytes. Note that you must provide exactly one of --record-size or --payload-file.]')
_kafka_producer_perf_test_args+=('--payload-file[ --payload-file PAYLOAD-FILE file to read the message payloads from. This works only for UTF-8 encoded text files. Payloads will be read from this file and a payload will be randomly selected when sending messages. Note that you must provide exactly one of -- record-size or --payload-file.]')
compdef "_kafka-command kafka-producer-perf-test" kafka-producer-perf-test
declare -a _kafka_dump_log_args
_kafka_dump_log_args=()
_kafka_dump_log_args+=('--cluster-metadata-decoder[ if set, log data will be parsed as cluster metadata records.]')
_kafka_dump_log_args+=('--deep-iteration[ if set, uses deep instead of shallow iteration. Automatically set if print- data-log is enabled.]')
_kafka_dump_log_args+=('--files[ REQUIRED: The comma separated list of files to be dumped. The supported file types include log files, index files, time index files, transaction index files, producer snapshots and tier state files.]:file:_files')
_kafka_dump_log_args+=('--help[ Print usage information.]')
_kafka_dump_log_args+=('--index-sanity-check[ if set, just checks the index sanity without printing its content. This is the same check that is executed on broker startup to determine if an index needs rebuilding or not.]')
_kafka_dump_log_args+=('--key-decoder-class[ if set, used to deserialize the keys. This class should implement kafka.serializer. Decoder trait. Custom jar should be available in kafka/libs directory. (default: kafka.serializer.StringDecoder)]:file:_files')
_kafka_dump_log_args+=('--max-message-size[ Size of largest message. (default: 5242880)]:file:_files')
_kafka_dump_log_args+=('--offsets-decoder[ if set, log data will be parsed as offset data from the __consumer_offsets topic.]')
_kafka_dump_log_args+=('--print-data-log[ if set, printing the messages content when dumping data logs. Automatically set if any decoder option is specified.]')
_kafka_dump_log_args+=('--quotas-decoder[ if set, log data will be parsed as quotas data from the _confluent-quotas topic]')
_kafka_dump_log_args+=('--skip-record-metadata[ whether to skip printing metadata for each record.]')
_kafka_dump_log_args+=('--transaction-log-decoder[ if set, log data will be parsed as transaction metadata from the __transaction_state topic.]')
_kafka_dump_log_args+=('--value-decoder-class[ if set, used to deserialize the messages. This class should implement kafka. serializer.Decoder trait. Custom jar should be available in kafka/libs directory. (default: kafka.serializer. StringDecoder)]:file:_files')
_kafka_dump_log_args+=('--verify-index-only[ if set, just verify the index log without printing its content.]')
_kafka_dump_log_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-dump-log" kafka-dump-log
declare -a _kafka_log_dirs_args
_kafka_log_dirs_args=()
_kafka_log_dirs_args+=('--bootstrap-server[ REQUIRED: the server(s) to use for bootstrapping]:file:_files')
_kafka_log_dirs_args+=('--broker-list[ The list of brokers to be queried in the form "0,1,2". All brokers in the cluster will be queried if no broker list is specified]:file:_files')
_kafka_log_dirs_args+=('--command-config[ Property file containing configs to be passed to Admin Client.]:file:_files')
_kafka_log_dirs_args+=('--describe[ Describe the specified log directories on the specified brokers.]')
_kafka_log_dirs_args+=('--help[ Print usage information.]')
_kafka_log_dirs_args+=('--topic-list[ The list of topics to be queried in the form "topic1,topic2,topic3". All topics will be queried if no topic list is specified (default: )]:topic:_kafka-list-topic')
_kafka_log_dirs_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-log-dirs" kafka-log-dirs
declare -a _kafka_verifiable_consumer_args
_kafka_verifiable_consumer_args=()
_kafka_verifiable_consumer_args+=('--topic[ --topic TOPIC Consumes messages from this topic.]')
_kafka_verifiable_consumer_args+=('--group-id[ --group-id GROUP_ID The groupId shared among members of the consumer group]')
_kafka_verifiable_consumer_args+=('--group-instance-id[ --group-instance-id GROUP_INSTANCE_ID A unique identifier of the consumer instance]')
_kafka_verifiable_consumer_args+=('--max-messages[ --max-messages MAX-MESSAGES Consume this many messages. If -1 (the default), the consumer will consume until the process is killed externally (default: -1)]')
_kafka_verifiable_consumer_args+=('--session-timeout[ --session-timeout TIMEOUT_MS Set the consumer''s session timeout (default: 30000)]')
_kafka_verifiable_consumer_args+=('--verbose[ --verbose Enable to log individual consumed records (default: false)]')
_kafka_verifiable_consumer_args+=('--enable-autocommit[ --enable-autocommit Enable offset auto-commit on consumer (default: false)]')
_kafka_verifiable_consumer_args+=('--send-offset-for-times-data[ --send-offset-for-times-data Consumer sends offsetForTimes() information for all the partitions it has subscribed to. Use when version = DEV_BRANCH (default: false)]')
_kafka_verifiable_consumer_args+=('--reset-policy[ --reset-policy RESETPOLICY Set reset policy (must be either ''earliest'', ''latest'', or ''none'' (default: earliest)]')
_kafka_verifiable_consumer_args+=('--assignment-strategy[ --assignment-strategy ASSIGNMENTSTRATEGY Set assignment strategy (e.g. org.apache.kafka.clients.consumer.RoundRobinAssignor) (default: org.apache.kafka.clients.consumer.RangeAssignor)]')
_kafka_verifiable_consumer_args+=('--consumer.config[ --consumer.config CONFIG_FILE Consumer config properties file (config options shared with command line parameters will be overridden). Connection Group: Group of arguments for connection to brokers]')
_kafka_verifiable_consumer_args+=('--bootstrap-server[ --bootstrap-server HOST1:PORT1\[,HOST2:PORT2\[...\]\] REQUIRED unless --broker-list(deprecated) is specified. The server(s) to connect to. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...]')
_kafka_verifiable_consumer_args+=('--broker-list[ --broker-list HOST1:PORT1\[,HOST2:PORT2\[...\]\] DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap-server is specified. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...]')
compdef "_kafka-command kafka-verifiable-consumer" kafka-verifiable-consumer
declare -a _kafka_verifiable_producer_args
_kafka_verifiable_producer_args=()
_kafka_verifiable_producer_args+=('--topic[ --topic TOPIC Produce messages to this topic.]')
_kafka_verifiable_producer_args+=('--max-messages[ --max-messages MAX-MESSAGES Produce this many messages. If -1, produce messages until the process is killed externally. (default: -1)]')
_kafka_verifiable_producer_args+=('--throughput[ --throughput THROUGHPUT If set >= 0, throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. (default: -1)]')
_kafka_verifiable_producer_args+=('--acks[ --acks ACKS Acks required on each produced message. See Kafka docs on acks for details. (default: -1)]')
_kafka_verifiable_producer_args+=('--producer.config[ --producer.config CONFIG_FILE Producer config properties file.]')
_kafka_verifiable_producer_args+=('--message-create-time[ --message-create-time CREATETIME Send messages with creation time starting at the arguments value, in milliseconds since epoch (default: -1)]')
_kafka_verifiable_producer_args+=('--value-prefix[ --value-prefix VALUE-PREFIX If specified, each produced value will have this prefix with a dot separator]')
_kafka_verifiable_producer_args+=('--repeating-keys[ --repeating-keys REPEATING-KEYS If specified, each produced record will have a key starting at 0 increment by 1 up to the number specified (exclusive), then the key is set to 0 again Connection Group: Group of arguments for connection to brokers]')
_kafka_verifiable_producer_args+=('--bootstrap-server[ --bootstrap-server HOST1:PORT1\[,HOST2:PORT2\[...\]\] REQUIRED: The server(s) to connect to. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...]')
_kafka_verifiable_producer_args+=('--broker-list[ --broker-list HOST1:PORT1\[,HOST2:PORT2\[...\]\] DEPRECATED, use --bootstrap-server instead; ignored if --bootstrap-server is specified. Comma-separated list of Kafka brokers in the form HOST1:PORT1,HOST2:PORT2,...]')
compdef "_kafka-command kafka-verifiable-producer" kafka-verifiable-producer
declare -a _kafka_streams_application_reset_args
_kafka_streams_application_reset_args=()
_kafka_streams_application_reset_args+=('--bootstrap-servers[ Comma-separated list of broker urls with format: HOST1:PORT1,HOST2:PORT2 (default: localhost:9092)]:file:_files')
_kafka_streams_application_reset_args+=('--by-duration[ Reset offsets to offset by duration from current timestamp. Format: ''PnDTnHnMnS'']:file:_files')
_kafka_streams_application_reset_args+=('--config-file[ Property file containing configs to be passed to admin clients and embedded consumer.]:file:_files')
_kafka_streams_application_reset_args+=('--dry-run[ Display the actions that would be performed without executing the reset commands.]')
_kafka_streams_application_reset_args+=('--force[ Force the removal of members of the consumer group (intended to remove stopped members if a long session timeout was used). Make sure to shut down all stream applications when this option is specified to avoid unexpected rebalances.]')
_kafka_streams_application_reset_args+=('--from-file[ Reset offsets to values defined in CSV file.]:file:_files')
_kafka_streams_application_reset_args+=('--help[ Print usage information.]')
_kafka_streams_application_reset_args+=('--input-topics[ Comma-separated list of user input topics. For these topics, the tool by default will reset the offset to the earliest available offset. Reset to other offset position by appending other reset offset option, ex: --input- topics foo --shift-by 5]:topic:_kafka-list-topic')
_kafka_streams_application_reset_args+=('--intermediate-topics[ Comma-separated list of intermediate user topics (topics that are input and output topics, e.g., used in the deprecated through() method). For these topics, the tool will skip to the end.]:topic:_kafka-list-topic')
_kafka_streams_application_reset_args+=('--internal-topics[ Comma-separated list of internal topics to delete. Must be a subset of the internal topics marked for deletion by the default behaviour (do a dry-run without this option to view these topics).]:topic:_kafka-list-topic')
_kafka_streams_application_reset_args+=('--shift-by[ Reset offsets shifting current offset by ''n'', where ''n'' can be positive or negative]:file:_files')
_kafka_streams_application_reset_args+=('--to-datetime[ Reset offsets to offset from datetime. Format: ''YYYY-MM-DDTHH:mm:SS.sss'']:file:_files')
_kafka_streams_application_reset_args+=('--to-earliest[ Reset offsets to earliest offset.]')
_kafka_streams_application_reset_args+=('--to-latest[ Reset offsets to latest offset.]')
_kafka_streams_application_reset_args+=('--to-offset[ Reset offsets to a specific offset.]:file:_files')
_kafka_streams_application_reset_args+=('--version[ Print version information and exit.]')
compdef "_kafka-command kafka-streams-application-reset" kafka-streams-application-reset
declare -a _kafka_mirror_maker_args
_kafka_mirror_maker_args=()
_kafka_mirror_maker_args+=('--abort.on.send.failure[ Configure the mirror maker to exit on a failed send. (default: true)]:file:_files')
_kafka_mirror_maker_args+=('--consumer.config[ Embedded consumer config for consuming from the source cluster.]:file:_files')
_kafka_mirror_maker_args+=('--consumer.rebalance.listener[ The consumer rebalance listener to use for mirror maker consumer.]:file:_files')
_kafka_mirror_maker_args+=('--help[ Print usage information.]')
_kafka_mirror_maker_args+=('--include[ List of included topics to mirror.]:file:_files')
_kafka_mirror_maker_args+=('--message.handler[ Message handler which will process every record in-between consumer and producer.]:file:_files')
_kafka_mirror_maker_args+=('--message.handler.args[ Arguments used by custom message handler for mirror maker.]:file:_files')
_kafka_mirror_maker_args+=('--new.consumer[ DEPRECATED Use new consumer in mirror maker (this is the default so this option will be removed in a future version).]')
_kafka_mirror_maker_args+=('--num.streams[ Number of consumption streams. (default: 1)]:file:_files')
_kafka_mirror_maker_args+=('--offset.commit.interval.ms[ Offset commit interval in ms. (default: 60000)]:file:_files')
_kafka_mirror_maker_args+=('--producer.config[ Embedded producer config.]:file:_files')
_kafka_mirror_maker_args+=('--rebalance.listener.args[ Arguments used by custom rebalance listener for mirror maker consumer.]:file:_files')
_kafka_mirror_maker_args+=('--version[ Display Kafka version.]')
_kafka_mirror_maker_args+=('--whitelist[ DEPRECATED, use --include instead; ignored if --include specified. List of included topics to mirror.]:file:_files')
compdef "_kafka-command kafka-mirror-maker" kafka-mirror-maker
declare -a _kafka_delete_records_args
_kafka_delete_records_args=()
_kafka_delete_records_args+=('--bootstrap-server[ REQUIRED: The server to connect to.]:file:_files')
_kafka_delete_records_args+=('--command-config[ A property file containing configs to be passed to Admin Client.]:file:_files')
_kafka_delete_records_args+=('--help[ Print usage information.]')
_kafka_delete_records_args+=('--offset-json-file[ REQUIRED: The JSON file with offset per partition. The format to use is: {"partitions": \[{"topic": "foo", "partition": 1, "offset": 1}\], "version":1 }]:file:_files')
_kafka_delete_records_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-delete-records" kafka-delete-records
declare -a _replicator_args
_replicator_args=()
_replicator_args+=('--cluster.id[ --cluster.id <Replicator Cluster Id> \[--cluster.threads <Total Replicator threads>\] \[--confluent.license <Confluent License Key>\]]')
_replicator_args+=('--consumer.config[ --consumer.config <consumer.properties> \[--consumer.monitoring.config <consumer-monitoring.properties>\] \[(-h | --help)\] --producer.config <producer.properties> \[--producer.monitoring.config <producer-monitoring.properties>\] \[--replication.config <replication.properties>\] \[--topic.auto.create <Auto-create Topics on Destination>\] \[--topic.config.sync <Sync Topic Configs>\] \[--topic.config.sync.interval.ms <Topic Config Sync Interval (ms)>\] \[--topic.create.backoff.ms <Topic Creation Backoff (ms)>\] \[--topic.poll.interval.ms <Topic Config Sync Interval (ms)>\] \[--topic.preserve.partitions <Auto-create Topics on Destination>\] \[--topic.regex <Regular Expression to Match Topics for Replication>\] \[--topic.rename.format <Rename Format>\] \[--topic.timestamp.type <Topic Timestamp Type>\] \[--whitelist <Topic Whitelist>\] OPTIONS]')
_replicator_args+=('--blacklist[ --blacklist <Topic Blacklist> A comma-separated list of topics that should not be replicated, even if they are included in the whitelist or matched by the regular expression.]')
_replicator_args+=('--cluster.id[ --cluster.id <Replicator Cluster Id> Specifies the unique identifier for the Replicator cluster.]')
_replicator_args+=('--cluster.threads[ --cluster.threads <Total Replicator threads> The total number of threads across all workers in the Replicator cluster. If this command starts another Replicator worker in an existing cluster, this can be used to change the number of threads in the whole cluster.]')
_replicator_args+=('--confluent.license[ --confluent.license <Confluent License Key> Your Confluent license key that enables you to use Replicator. Without the license key, you can use Replicator for a 30-day trial period. If you are a subscriber, please contact Confluent Support for more information.]')
_replicator_args+=('--consumer.config[ --consumer.config <consumer.properties> Specifies the location of the file that contains the configuration settings for the consumer reading from the origin cluster.]')
_replicator_args+=('--consumer.monitoring.config[ --consumer.monitoring.config <consumer-monitoring.properties> Specifies the location of the file that contains the producer settings for the Kafka cluster where monitoring information about the Replicator consumer is to be sent. This must be specified if monitoring is to be enabled, but may point to a different Kafka cluster than the origin or destination clusters. Use the same file as `--producer-config` to write metrics to the destination cluster. -h, --help Display help information]')
_replicator_args+=('--producer.config[ --producer.config <producer.properties> Specifies the location of the file that contains the configuration settings for the producer writing to the destination cluster.]')
_replicator_args+=('--producer.monitoring.config[ --producer.monitoring.config <producer-monitoring.properties> Specifies the location of the file that contains the producer settings for the Kafka cluster where monitoring information about the Replicator producer is to be sent. This must be specified if monitoring is to be enabled, but may point to a different Kafka cluster than the origin or destination clusters. Use the same file as --producer-config to write metrics to the destination cluster.]')
_replicator_args+=('--replication.config[ --replication.config <replication.properties> Specifies the location of the file that contains the configuration settings for replication. When used, any property in this file can be overridden via a command line parameter. When this is not supplied, all of the properties defining how topics are to be replicated should be specified on the command line.]')
_replicator_args+=('--topic.auto.create[ --topic.auto.create <Auto-create Topics on Destination> Whether to automatically create topics in the destination cluster if required.]')
_replicator_args+=('--topic.config.sync[ --topic.config.sync <Sync Topic Configs> Whether to periodically sync topic configuration to the destination cluster.]')
_replicator_args+=('--topic.config.sync.interval.ms[ --topic.config.sync.interval.ms <Topic Config Sync Interval (ms)> How often to check for configuration changes when ''topic.config.sync'' is enabled.]')
_replicator_args+=('--topic.create.backoff.ms[ --topic.create.backoff.ms <Topic Creation Backoff (ms)> Time to wait before retrying auto topic creation or expansion.]')
_replicator_args+=('--topic.poll.interval.ms[ --topic.poll.interval.ms <Topic Config Sync Interval (ms)> Specifies how frequently to poll the source cluster for new topics matching the whitelist or regular expression. Can also be read from the file given by --replication.config.]')
_replicator_args+=('--topic.preserve.partitions[ --topic.preserve.partitions <Auto-create Topics on Destination> Whether to automatically increase the number of partitions in the destination cluster to match the source cluster and ensure that messages replicated from the source cluster use the same partition in the destination cluster.]')
_replicator_args+=('--topic.regex[ --topic.regex <Regular Expression to Match Topics for Replication> A regular expression that matches the names of the topics to be replicated. Any topic that matches this expression (or is listed in the whitelist) and not in the blacklist will be replicated.]')
_replicator_args+=('--topic.rename.format[ --topic.rename.format <Rename Format> A format string for the topic name in the destination cluster, which may contain ${topic} as a placeholder for the originating topic name. For example, ${topic}_dc1 for the topic ''orders'' will map to the destination topic name ''orders_dc1.'' Can be placed inside the file specified by --replication.config.]')
_replicator_args+=('--topic.timestamp.type[ --topic.timestamp.type <Topic Timestamp Type> The timestamp type for the topics in the destination cluster.]')
_replicator_args+=('--whitelist[ --whitelist <Topic Whitelist> A comma-separated list of the names of topics that should be replicated. Any topic that is in this list and not in the blacklist will be replicated.]')
compdef "_kafka-command replicator" replicator
declare -a _kafka_reassign_partitions_args
_kafka_reassign_partitions_args=()
_kafka_reassign_partitions_args+=('--additional[ Execute this reassignment in addition to any other ongoing ones. This option can also be used to change the throttle of an ongoing reassignment.]')
_kafka_reassign_partitions_args+=('--bootstrap-server[ REQUIRED: the server(s) to use for bootstrapping.]:file:_files')
_kafka_reassign_partitions_args+=('--broker-list[ The list of brokers to which the partitions need to be reassigned in the form "0,1,2". This is required if --topics-to-move-json-file is used to generate reassignment configuration]:file:_files')
_kafka_reassign_partitions_args+=('--cancel[ Cancel an active reassignment.]')
_kafka_reassign_partitions_args+=('--command-config[ Property file containing configs to be passed to Admin Client.]:file:_files')
_kafka_reassign_partitions_args+=('--disable-rack-aware[ Disable rack aware replica assignment]')
_kafka_reassign_partitions_args+=('--execute[ Kick off the reassignment as specified by the --reassignment-json-file option.]')
_kafka_reassign_partitions_args+=('--generate[ Generate a candidate partition reassignment configuration. Note that this only generates a candidate assignment, it does not execute it.]')
_kafka_reassign_partitions_args+=('--help[ Print usage information.]')
_kafka_reassign_partitions_args+=('--list[ List all active partition reassignments.]')
_kafka_reassign_partitions_args+=('--preserve-throttles[ Do not modify broker or topic throttles.]')
_kafka_reassign_partitions_args+=('--reassignment-json-file[ The JSON file with the partition reassignment configurationThe format to use is - {"partitions": \[{"topic": "foo", "partition": 1, "replicas": \[1,2,3,4\], "observers":\[3,4\], "log_dirs": \["dir1","dir2","dir3"," dir4"\] }\], "version":1 } Note that "log_dirs" is optional. When it is specified, its length must equal the length of the replicas list. The value in this list can be either "any" or the absolution path of the log directory on the broker. If absolute log directory path is specified, the replica will be moved to the specified log directory on the broker. Note that "observers" is optional. When it is specified it must be a suffix of the replicas list.]:file:_files')
_kafka_reassign_partitions_args+=('--replica-alter-log-dirs-throttle[ The movement of replicas between log directories on the same broker will be throttled to this value (bytes/sec). This option can be included with --execute when a reassignment is started, and it can be altered by resubmitting the current reassignment along with the --additional flag. The throttle rate should be at least 1 KB/s. (default: -1)]')
_kafka_reassign_partitions_args+=('--throttle[ The movement of partitions between brokers will be throttled to this value (bytes/sec). This option can be included with --execute when a reassignment is started, and it can be altered by resubmitting the current reassignment along with the --additional flag. The throttle rate should be at least 1 KB/s. (default: -1)]:file:_files')
_kafka_reassign_partitions_args+=('--timeout[ The maximum time in ms to wait for log directory replica assignment to begin. (default: 10000)]:file:_files')
_kafka_reassign_partitions_args+=('--topics-to-move-json-file[ Generate a reassignment configuration to move the partitions of the specified topics to the list of brokers specified by the --broker- list option. The format to use is - {"topics": \[{"topic": "foo"},{"topic": "foo1"}\], "version":1 }]:topic:_kafka-list-topic')
_kafka_reassign_partitions_args+=('--verify[ Verify if the reassignment completed as specified by the --reassignment- json-file option. If there is a throttle engaged for the replicas specified, and the rebalance has completed, the throttle will be removed]')
_kafka_reassign_partitions_args+=('--version[ Display Kafka version.]')
compdef "_kafka-command kafka-reassign-partitions" kafka-reassign-partitions