@@ -35,6 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
3535import org .apache .hadoop .mapreduce .lib .input .{FileInputFormat => NewFileInputFormat }
3636import org .apache .mesos .MesosNativeLibrary
3737
38+ import org .apache .spark .annotations .{DeveloperApi , Experimental }
3839import org .apache .spark .broadcast .Broadcast
3940import org .apache .spark .deploy .{LocalSparkCluster , SparkHadoopUtil }
4041import org .apache .spark .partial .{ApproximateEvaluator , PartialResult }
@@ -48,28 +49,31 @@ import org.apache.spark.ui.SparkUI
4849import org .apache .spark .util .{ClosureCleaner , MetadataCleaner , MetadataCleanerType , TimeStampedHashMap , Utils }
4950
5051/**
52+ * :: DeveloperApi ::
5153 * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
5254 * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
5355 *
5456 * @param config a Spark Config object describing the application configuration. Any settings in
5557 * this config overrides the default configs as well as system properties.
5658 */
57- class SparkContext (config : SparkConf )
58- extends Logging {
59+
60+ @ DeveloperApi
61+ class SparkContext (config : SparkConf ) extends Logging {
5962
6063 // This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
6164 // etc) too. This is typically generated from InputFormatInfo.computePreferredLocations. It
6265 // contains a map from hostname to a list of input format splits on the host.
6366 private [spark] var preferredNodeLocationData : Map [String , Set [SplitInfo ]] = Map ()
6467
6568 /**
66- * <span class="developer badge">Developer API</span>
69+ * :: DeveloperApi ::
6770 * Alternative constructor for setting preferred locations where Spark will create executors.
6871 *
6972 * @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
7073 * be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations ]]
7174 * from a list of input files or InputFormats for the application.
7275 */
76+ @ DeveloperApi
7377 def this (config : SparkConf , preferredNodeLocationData : Map [String , Set [SplitInfo ]]) = {
7478 this (config)
7579 this .preferredNodeLocationData = preferredNodeLocationData
@@ -714,9 +718,10 @@ class SparkContext(config: SparkConf)
714718 }
715719
716720 /**
717- * <span class="developer badge">Developer API</span>
721+ * :: DeveloperApi ::
718722 * Register a listener to receive up-calls from events that happen during execution.
719723 */
724+ @ DeveloperApi
720725 def addSparkListener (listener : SparkListener ) {
721726 listenerBus.addListener(listener)
722727 }
@@ -1026,9 +1031,10 @@ class SparkContext(config: SparkConf)
10261031 }
10271032
10281033 /**
1029- * <span class="developer badge">Developer API</span>
1034+ * :: DeveloperApi ::
10301035 * Run a job that can return approximate results.
10311036 */
1037+ @ DeveloperApi
10321038 def runApproximateJob [T , U , R ](
10331039 rdd : RDD [T ],
10341040 func : (TaskContext , Iterator [T ]) => U ,
@@ -1044,9 +1050,9 @@ class SparkContext(config: SparkConf)
10441050 }
10451051
10461052 /**
1047- * <span class="experimental badge">Experimental</span>
10481053 * Submit a job for execution and return a FutureJob holding the result.
10491054 */
1055+ @ Experimental
10501056 def submitJob [T , U , R ](
10511057 rdd : RDD [T ],
10521058 processPartition : Iterator [T ] => U ,
0 commit comments