Skip to content

Commit cd7a465

Browse files
committed
Code review feedback
1 parent 2f706f1 commit cd7a465

File tree

15 files changed

+50
-13
lines changed

15 files changed

+50
-13
lines changed

core/src/main/scala/org/apache/spark/Aggregator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@ import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}
2121

2222
/**
2323
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
24-
*
2524
* A set of functions used to aggregate data.
2625
*
2726
* @param createCombiner function to create the initial value of the aggregation.
2827
* @param mergeValue function to merge a new value into the aggregation result.
2928
* @param mergeCombiners function to merge outputs from multiple mergeValue function.
3029
*/
30+
3131
case class Aggregator[K, V, C] (
3232
createCombiner: V => C,
3333
mergeValue: (C, V) => C,

core/src/main/scala/org/apache/spark/FutureAction.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,8 @@ trait FutureAction[T] extends Future[T] {
8686

8787

8888
/**
89+
* <span class="badge badge-red">EXPERIMENTAL API</span>
90+
*
8991
* A [[FutureAction]] holding the result of an action that triggers a single job. Examples include
9092
* count, collect, reduce.
9193
*/

core/src/main/scala/org/apache/spark/InterruptibleIterator.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ package org.apache.spark
2121
* An iterator that wraps around an existing iterator to provide task killing functionality.
2222
* It works by checking the interrupted flag in [[TaskContext]].
2323
*/
24-
class InterruptibleIterator[+T](val context: TaskContext, val delegate: Iterator[T])
24+
private[spark] class InterruptibleIterator[+T](val context: TaskContext, val delegate: Iterator[T])
2525
extends Iterator[T] {
2626

2727
def hasNext: Boolean = !context.interrupted && delegate.hasNext

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ class SparkContext(
184184
jars.foreach(addJar)
185185
}
186186

187-
def warnSparkMem(value: String): String = {
187+
private def warnSparkMem(value: String): String = {
188188
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
189189
"deprecated, please use spark.executor.memory instead.")
190190
value
@@ -665,6 +665,11 @@ class SparkContext(
665665
postEnvironmentUpdate()
666666
}
667667

668+
/**
669+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
670+
*
671+
* Register a listener to receive up-calls from events that happen during execution.
672+
*/
668673
def addSparkListener(listener: SparkListener) {
669674
listenerBus.addListener(listener)
670675
}
@@ -974,6 +979,8 @@ class SparkContext(
974979
}
975980

976981
/**
982+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
983+
*
977984
* Run a job that can return approximate results.
978985
*/
979986
def runApproximateJob[T, U, R](
@@ -991,6 +998,8 @@ class SparkContext(
991998
}
992999

9931000
/**
1001+
* <span class="badge badge-red">EXPERIMENTAL API</span>
1002+
*
9941003
* Submit a job for execution and return a FutureJob holding the result.
9951004
*/
9961005
def submitJob[T, U, R](

core/src/main/scala/org/apache/spark/broadcast/BroadcastFactory.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ import org.apache.spark.SecurityManager
2121
import org.apache.spark.SparkConf
2222

2323
/**
24+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
25+
*
2426
* An interface for all the broadcast implementations in Spark (to allow
2527
* multiple broadcast implementations). SparkContext uses a user-specified
2628
* BroadcastFactory implementation to instantiate a particular broadcast for the

core/src/main/scala/org/apache/spark/package.scala

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,15 @@ package org.apache
3232
*
3333
* Java programmers should reference the [[spark.api.java]] package
3434
* for Spark programming APIs in Java.
35+
*
36+
* Classes and methods marked with <span class="badge badge-red">EXPERIMENTAL API</span> are
37+
* user-facing features which have not been officially adopted by the Spark project. These are
38+
* subject to change or removal in minor releases.
39+
*
40+
* Classes and methods marked with <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
41+
* are intended for advanced users want to extend Spark through lower level interfaces. These are
42+
* subject to changes or removal in minor releases.
43+
*
3544
*/
3645
package object spark {
3746
// For package docs only

core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,17 @@ private[spark] class CoGroupPartition(idx: Int, val deps: Array[CoGroupSplitDep]
5151
}
5252

5353
/**
54+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
55+
*
5456
* A RDD that cogroups its parents. For each key k in parent RDDs, the resulting RDD contains a
5557
* tuple with the list of values for that key.
5658
*
59+
* Note: This is an internal API. We recommend users use RDD.coGroup(...) instead of
60+
* instantiating this directly.
61+
5762
* @param rdds parent RDDs.
58-
* @param part partitioner used to partition the shuffle output.
63+
* @param part partitioner used to partition the shuffle output
5964
*/
60-
private[spark]
6165
class CoGroupedRDD[K](@transient var rdds: Seq[RDD[_ <: Product2[K, _]]], part: Partitioner)
6266
extends RDD[(K, Seq[Seq[_]])](rdds.head.context, Nil) {
6367

core/src/main/scala/org/apache/spark/rdd/EmptyRDD.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import scala.reflect.ClassTag
2222
import org.apache.spark.{Partition, SparkContext, TaskContext}
2323

2424
/**
25-
* An RDD that is empty, i.e. has no element in it.
25+
* An RDD that has no partitions and no elements..
2626
*/
2727
private[spark] class EmptyRDD[T: ClassTag](sc: SparkContext) extends RDD[T](sc, Nil) {
2828

core/src/main/scala/org/apache/spark/rdd/RDD.scala

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -513,6 +513,8 @@ abstract class RDD[T: ClassTag](
513513
}
514514

515515
/**
516+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
517+
*
516518
* Return a new RDD by applying a function to each partition of this RDD. This is a variant of
517519
* mapPartitions that also passes the TaskContext into the closure.
518520
*/
@@ -775,7 +777,9 @@ abstract class RDD[T: ClassTag](
775777
def count(): Long = sc.runJob(this, Utils.getIteratorSize _).sum
776778

777779
/**
778-
* (Experimental) Approximate version of count() that returns a potentially incomplete result
780+
* <span class="badge badge-red">EXPERIMENTAL API</span>
781+
*
782+
* Approximate version of count() that returns a potentially incomplete result
779783
* within a timeout, even if not all tasks have finished.
780784
*/
781785
def countApprox(timeout: Long, confidence: Double = 0.95): PartialResult[BoundedDouble] = {
@@ -821,7 +825,9 @@ abstract class RDD[T: ClassTag](
821825
}
822826

823827
/**
824-
* (Experimental) Approximate version of countByValue().
828+
* <span class="badge badge-red">EXPERIMENTAL API</span>
829+
*
830+
* Approximate version of countByValue().
825831
*/
826832
def countByValueApprox(
827833
timeout: Long,
@@ -843,6 +849,8 @@ abstract class RDD[T: ClassTag](
843849
}
844850

845851
/**
852+
* <span class="badge badge-red">EXPERIMENTAL API</span>
853+
*
846854
* Return approximate number of distinct elements in the RDD.
847855
*
848856
* The accuracy of approximation can be controlled through the relative standard deviation

core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ import org.apache.spark._
2828
import org.apache.spark.executor.TaskMetrics
2929

3030
/**
31+
* <span class="badge badge-red">DEVELOPER API - UNSTABLE</span>
32+
*
3133
* A logger class to record runtime information for jobs in Spark. This class outputs one log file
3234
* for each Spark job, containing tasks start/stop and shuffle information. JobLogger is a subclass
3335
* of SparkListener, use addSparkListener to add JobLogger to a SparkContext after the SparkContext

0 commit comments

Comments
 (0)