Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dev/create-release/release-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ PUBLISH_SCALA_2_10=0
SCALA_2_10_PROFILES="-Pscala-2.10"
SCALA_2_11_PROFILES=
if [[ $SPARK_VERSION > "2.3" ]]; then
BASE_PROFILES="$BASE_PROFILES -Pkubernetes -Pflume"
BASE_PROFILES="$BASE_PROFILES -Pkubernetes"
SCALA_2_11_PROFILES="-Pkafka-0-8"
else
PUBLISH_SCALA_2_10=1
Expand Down
2 changes: 1 addition & 1 deletion dev/mima
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ set -e
FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
cd "$FWDIR"

SPARK_PROFILES="-Pmesos -Pkafka-0-8 -Pkubernetes -Pyarn -Pflume -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive"
SPARK_PROFILES="-Pmesos -Pkafka-0-8 -Pkubernetes -Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive"
TOOLS_CLASSPATH="$(build/sbt -DcopyDependencies=false "export tools/fullClasspath" | tail -n1)"
OLD_DEPS_CLASSPATH="$(build/sbt -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | tail -n1)"

Expand Down
1 change: 0 additions & 1 deletion dev/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,6 @@ def build_spark_sbt(hadoop_version):
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["test:package", # Build test jars as some tests depend on them
"streaming-kafka-0-8-assembly/assembly",
"streaming-flume-assembly/assembly",
"streaming-kinesis-asl-assembly/assembly"]
profiles_and_goals = build_profiles + sbt_goals

Expand Down
1 change: 0 additions & 1 deletion dev/sbt-checkstyle
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ ERRORS=$(echo -e "q\n" \
-Pkafka-0-8 \
-Pkubernetes \
-Pyarn \
-Pflume \
-Phive \
-Phive-thriftserver \
checkstyle test:checkstyle \
Expand Down
1 change: 0 additions & 1 deletion dev/scalastyle
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ ERRORS=$(echo -e "q\n" \
-Pkafka-0-8 \
-Pkubernetes \
-Pyarn \
-Pflume \
-Phive \
-Phive-thriftserver \
-Pspark-ganglia-lgpl \
Expand Down
52 changes: 0 additions & 52 deletions dev/sparktestsupport/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,56 +283,6 @@ def __hash__(self):
]
)

streaming_flume_sink = Module(
name="streaming-flume-sink",
dependencies=[streaming],
source_file_regexes=[
"external/flume-sink",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
},
sbt_test_goals=[
"streaming-flume-sink/test",
]
)


streaming_flume = Module(
name="streaming-flume",
dependencies=[streaming],
source_file_regexes=[
"external/flume",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
},
sbt_test_goals=[
"streaming-flume/test",
]
)


streaming_flume_assembly = Module(
name="streaming-flume-assembly",
dependencies=[streaming_flume, streaming_flume_sink],
source_file_regexes=[
"external/flume-assembly",
],
build_profile_flags=[
"-Pflume",
],
environ={
"ENABLE_FLUME_TESTS": "1"
}
)


mllib_local = Module(
name="mllib-local",
Expand Down Expand Up @@ -425,14 +375,12 @@ def __hash__(self):
pyspark_core,
streaming,
streaming_kafka,
streaming_flume_assembly,
streaming_kinesis_asl
],
source_file_regexes=[
"python/pyspark/streaming"
],
environ={
"ENABLE_FLUME_TESTS": "1",
"ENABLE_KAFKA_0_8_TESTS": "1"
},
python_test_goals=[
Expand Down
2 changes: 1 addition & 1 deletion dev/test-dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ export LC_ALL=C
# TODO: This would be much nicer to do in SBT, once SBT supports Maven-style resolution.

# NOTE: These should match those in the release publishing script
HADOOP2_MODULE_PROFILES="-Phive-thriftserver -Pmesos -Pkafka-0-8 -Pkubernetes -Pyarn -Pflume -Phive"
HADOOP2_MODULE_PROFILES="-Phive-thriftserver -Pmesos -Pkafka-0-8 -Pkubernetes -Pyarn -Phive"
MVN="build/mvn"
HADOOP_PROFILES=(
hadoop-2.7
Expand Down
7 changes: 0 additions & 7 deletions docs/building-spark.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,6 @@ Note: Kafka 0.8 support is deprecated as of Spark 2.3.0.

Kafka 0.10 support is still automatically built.

## Building with Flume support

Apache Flume support must be explicitly enabled with the `flume` profile.
Note: Flume support is deprecated as of Spark 2.3.0.

./build/mvn -Pflume -DskipTests clean package

## Building submodules individually

It's possible to build Spark submodules using the `mvn -pl` option.
Expand Down
2 changes: 1 addition & 1 deletion docs/streaming-custom-receivers.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ title: Spark Streaming Custom Receivers
---

Spark Streaming can receive streaming data from any arbitrary data source beyond
the ones for which it has built-in support (that is, beyond Flume, Kafka, Kinesis, files, sockets, etc.).
the ones for which it has built-in support (that is, beyond Kafka, Kinesis, files, sockets, etc.).
This requires the developer to implement a *receiver* that is customized for receiving data from
the concerned data source. This guide walks through the process of implementing a custom receiver
and using it in a Spark Streaming application. Note that custom receivers can be implemented
Expand Down
169 changes: 0 additions & 169 deletions docs/streaming-flume-integration.md

This file was deleted.

Loading