1515 * limitations under the License.
1616 */
1717
18- package spark .bagel
18+ package org . apache . spark .bagel
1919
20- import spark ._
21- import spark .SparkContext ._
20+ import org . apache . spark ._
21+ import org . apache . spark .SparkContext ._
2222
23- import scala .collection .mutable .ArrayBuffer
24- import storage .StorageLevel
23+ import org .apache .spark .storage .StorageLevel
2524
2625object Bagel extends Logging {
2726 val DEFAULT_STORAGE_LEVEL = StorageLevel .MEMORY_AND_DISK
2827
2928 /**
3029 * Runs a Bagel program.
31- * @param sc [[spark.SparkContext ]] to use for the program.
30+ * @param sc [[org.apache. spark.SparkContext ]] to use for the program.
3231 * @param vertices vertices of the graph represented as an RDD of (Key, Vertex) pairs. Often the Key will be
3332 * the vertex id.
3433 * @param messages initial set of messages represented as an RDD of (Key, Message) pairs. Often this will be an
3534 * empty array, i.e. sc.parallelize(Array[K, Message]()).
36- * @param combiner [[spark.bagel.Combiner ]] combines multiple individual messages to a given vertex into one
35+ * @param combiner [[org.apache. spark.bagel.Combiner ]] combines multiple individual messages to a given vertex into one
3736 * message before sending (which often involves network I/O).
38- * @param aggregator [[spark.bagel.Aggregator ]] performs a reduce across all vertices after each superstep,
37+ * @param aggregator [[org.apache. spark.bagel.Aggregator ]] performs a reduce across all vertices after each superstep,
3938 * and provides the result to each vertex in the next superstep.
40- * @param partitioner [[spark.Partitioner ]] partitions values by key
39+ * @param partitioner [[org.apache. spark.Partitioner ]] partitions values by key
4140 * @param numPartitions number of partitions across which to split the graph.
4241 * Default is the default parallelism of the SparkContext
43- * @param storageLevel [[spark.storage.StorageLevel ]] to use for caching of intermediate RDDs in each superstep.
42+ * @param storageLevel [[org.apache. spark.storage.StorageLevel ]] to use for caching of intermediate RDDs in each superstep.
4443 * Defaults to caching in memory.
4544 * @param compute function that takes a Vertex, optional set of (possibly combined) messages to the Vertex,
4645 * optional Aggregator and the current superstep,
@@ -98,7 +97,7 @@ object Bagel extends Logging {
9897 verts
9998 }
10099
101- /** Runs a Bagel program with no [[spark.bagel.Aggregator ]] and the default storage level */
100+ /** Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]] and the default storage level */
102101 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest , C : Manifest ](
103102 sc : SparkContext ,
104103 vertices : RDD [(K , V )],
@@ -110,7 +109,7 @@ object Bagel extends Logging {
110109 compute : (V , Option [C ], Int ) => (V , Array [M ])
111110 ): RDD [(K , V )] = run(sc, vertices, messages, combiner, numPartitions, DEFAULT_STORAGE_LEVEL )(compute)
112111
113- /** Runs a Bagel program with no [[spark.bagel.Aggregator ]] */
112+ /** Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]] */
114113 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest , C : Manifest ](
115114 sc : SparkContext ,
116115 vertices : RDD [(K , V )],
@@ -128,7 +127,7 @@ object Bagel extends Logging {
128127 }
129128
130129 /**
131- * Runs a Bagel program with no [[spark.bagel.Aggregator ]], default [[spark.HashPartitioner ]]
130+ * Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]], default [[org.apache. spark.HashPartitioner ]]
132131 * and default storage level
133132 */
134133 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest , C : Manifest ](
@@ -141,7 +140,7 @@ object Bagel extends Logging {
141140 compute : (V , Option [C ], Int ) => (V , Array [M ])
142141 ): RDD [(K , V )] = run(sc, vertices, messages, combiner, numPartitions, DEFAULT_STORAGE_LEVEL )(compute)
143142
144- /** Runs a Bagel program with no [[spark.bagel.Aggregator ]] and the default [[spark.HashPartitioner ]]*/
143+ /** Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]] and the default [[org.apache. spark.HashPartitioner ]]*/
145144 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest , C : Manifest ](
146145 sc : SparkContext ,
147146 vertices : RDD [(K , V )],
@@ -159,8 +158,8 @@ object Bagel extends Logging {
159158 }
160159
161160 /**
162- * Runs a Bagel program with no [[spark.bagel.Aggregator ]], default [[spark.HashPartitioner ]],
163- * [[spark.bagel.DefaultCombiner ]] and the default storage level
161+ * Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]], default [[org.apache. spark.HashPartitioner ]],
162+ * [[org.apache. spark.bagel.DefaultCombiner ]] and the default storage level
164163 */
165164 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest ](
166165 sc : SparkContext ,
@@ -172,8 +171,8 @@ object Bagel extends Logging {
172171 ): RDD [(K , V )] = run(sc, vertices, messages, numPartitions, DEFAULT_STORAGE_LEVEL )(compute)
173172
174173 /**
175- * Runs a Bagel program with no [[spark.bagel.Aggregator ]], the default [[spark.HashPartitioner ]]
176- * and [[spark.bagel.DefaultCombiner ]]
174+ * Runs a Bagel program with no [[org.apache. spark.bagel.Aggregator ]], the default [[org.apache. spark.HashPartitioner ]]
175+ * and [[org.apache. spark.bagel.DefaultCombiner ]]
177176 */
178177 def run [K : Manifest , V <: Vertex : Manifest , M <: Message [K ] : Manifest ](
179178 sc : SparkContext ,
0 commit comments