@@ -247,9 +247,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
247247 private [spark] def eventLogDir : Option [URI ] = _eventLogDir
248248 private [spark] def eventLogCodec : Option [String ] = _eventLogCodec
249249
250- // Generate the random name for a temp folder in Tachyon
250+ // Generate the random name for a temp folder in external block store.
251251 // Add a timestamp as the suffix here to make it more safe
252- val tachyonFolderName = " spark-" + randomUUID.toString()
252+ val externalBlockStoreFolderName = " spark-" + randomUUID.toString()
253+ @ deprecated(" Use externalBlockStoreFolderName instead." , " 1.4.0" )
254+ val tachyonFolderName = externalBlockStoreFolderName
253255
254256 def isLocal : Boolean = (master == " local" || master.startsWith(" local[" ))
255257
@@ -386,7 +388,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
386388 }
387389 }
388390
389- _conf.set(" spark.tachyonStore .folderName" , tachyonFolderName )
391+ _conf.set(" spark.externalBlockStore .folderName" , externalBlockStoreFolderName )
390392
391393 if (master == " yarn-client" ) System .setProperty(" SPARK_YARN_MODE" , " true" )
392394
@@ -555,7 +557,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
555557 SparkEnv .executorActorSystemName,
556558 RpcAddress (host, port),
557559 ExecutorEndpoint .EXECUTOR_ENDPOINT_NAME )
558- Some (endpointRef.askWithReply [Array [ThreadStackTrace ]](TriggerThreadDump ))
560+ Some (endpointRef.askWithRetry [Array [ThreadStackTrace ]](TriggerThreadDump ))
559561 }
560562 } catch {
561563 case e : Exception =>
@@ -713,7 +715,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
713715 RDD [(String , String )] = {
714716 assertNotStopped()
715717 val job = new NewHadoopJob (hadoopConfiguration)
716- NewFileInputFormat .addInputPath(job, new Path (path))
718+ // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
719+ // comma separated files as input. (see SPARK-7155)
720+ NewFileInputFormat .setInputPaths(job, path)
717721 val updateConf = job.getConfiguration
718722 new WholeTextFileRDD (
719723 this ,
@@ -759,7 +763,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
759763 RDD [(String , PortableDataStream )] = {
760764 assertNotStopped()
761765 val job = new NewHadoopJob (hadoopConfiguration)
762- NewFileInputFormat .addInputPath(job, new Path (path))
766+ // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
767+ // comma separated files as input. (see SPARK-7155)
768+ NewFileInputFormat .setInputPaths(job, path)
763769 val updateConf = job.getConfiguration
764770 new BinaryFileRDD (
765771 this ,
@@ -935,7 +941,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
935941 // The call to new NewHadoopJob automatically adds security credentials to conf,
936942 // so we don't need to explicitly add them ourselves
937943 val job = new NewHadoopJob (conf)
938- NewFileInputFormat .addInputPath(job, new Path (path))
944+ // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
945+ // comma separated files as input. (see SPARK-7155)
946+ NewFileInputFormat .setInputPaths(job, path)
939947 val updatedConf = job.getConfiguration
940948 new NewHadoopRDD (this , fClass, kClass, vClass, updatedConf).setName(path)
941949 }
0 commit comments