Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dev/mima
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ generate_mima_ignore() {
# it did not process the new classes (which are in assembly jar).
generate_mima_ignore

export SPARK_CLASSPATH="`find lib_managed \( -name '*spark*jar' -a -type f \) | tr "\\n" ":"`"
export SPARK_CLASSPATH="$(build/sbt "export oldDeps/fullClasspath" | tail -n1)"
echo "SPARK_CLASSPATH=$SPARK_CLASSPATH"

generate_mima_ignore
Expand Down
22 changes: 17 additions & 5 deletions project/SparkBuild.scala
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/

import java.io._
import java.nio.file.Files

import scala.util.Properties
import scala.collection.JavaConverters._
Expand Down Expand Up @@ -135,8 +136,6 @@ object SparkBuild extends PomBuild {
.orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() })
.map(file),
incOptions := incOptions.value.withNameHashing(true),
retrieveManaged := true,
retrievePattern := "[type]s/[artifact](-[revision])(-[classifier]).[ext]",
publishMavenStyle := true,
unidocGenjavadocVersion := "0.9-spark0",

Expand Down Expand Up @@ -326,8 +325,6 @@ object OldDeps {
def oldDepsSettings() = Defaults.coreDefaultSettings ++ Seq(
name := "old-deps",
scalaVersion := "2.10.5",
retrieveManaged := true,
retrievePattern := "[type]s/[artifact](-[revision])(-[classifier]).[ext]",
libraryDependencies := Seq("spark-streaming-mqtt", "spark-streaming-zeromq",
"spark-streaming-flume", "spark-streaming-kafka", "spark-streaming-twitter",
"spark-streaming", "spark-mllib", "spark-bagel", "spark-graphx",
Expand Down Expand Up @@ -404,6 +401,8 @@ object Assembly {

val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")

val deployDatanucleusJars = taskKey[Unit]("Deploy datanucleus jars to the spark/lib_managed/jars directory")

lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
hadoopVersion := {
Expand All @@ -429,7 +428,20 @@ object Assembly {
case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
case "reference.conf" => MergeStrategy.concat
case _ => MergeStrategy.first
}
},
deployDatanucleusJars := {
val jars: Seq[File] = (fullClasspath in assembly).value.map(_.data)
.filter(_.getPath.contains("org.datanucleus"))
var libManagedJars = new File(BuildCommons.sparkHome, "lib_managed/jars")
libManagedJars.mkdirs()
jars.foreach { jar =>
val dest = new File(libManagedJars, jar.getName)
if (!dest.exists()) {
Files.copy(jar.toPath, dest.toPath)
}
}
},
assembly <<= assembly.dependsOn(deployDatanucleusJars)
)
}

Expand Down