File tree Expand file tree Collapse file tree 2 files changed +12
-1
lines changed
external/kafka-0-9/src/main/scala/org/apache/spark/streaming/kafka09 Expand file tree Collapse file tree 2 files changed +12
-1
lines changed Original file line number Diff line number Diff line change 168168 </dependency >
169169 </dependencies >
170170 </profile >
171+ <profile >
172+ <id >include-kafka-09</id >
173+ <dependencies >
174+ <dependency >
175+ <groupId >org.apache.spark</groupId >
176+ <artifactId >spark-streaming-kafka-0-9_${scala.binary.version}</artifactId >
177+ <version >${project.version} </version >
178+ </dependency >
179+ </dependencies >
180+ </profile >
171181 <profile >
172182 <id >spark-ganglia-lgpl</id >
173183 <dependencies >
Original file line number Diff line number Diff line change @@ -65,7 +65,8 @@ private[spark] class KafkaRDD[K, V](
6565 " must be set to false for executor kafka params, else offsets may commit before processing" )
6666
6767 // TODO is it necessary to have separate configs for initial poll time vs ongoing poll time?
68- private val pollTimeout = conf.getLong(" spark.streaming.kafka.consumer.poll.ms" , 512 )
68+ private val pollTimeout = conf.getLong(" spark.streaming.kafka.consumer.poll.ms" ,
69+ conf.getTimeAsMs(" spark.network.timeout" , " 120s" ))
6970 private val cacheInitialCapacity =
7071 conf.getInt(" spark.streaming.kafka.consumer.cache.initialCapacity" , 16 )
7172 private val cacheMaxCapacity =
You can’t perform that action at this time.
0 commit comments