Skip to content

Commit 82bc5d9

Browse files
committed
Merge branch 'master' of git://git.apache.org/spark into fix-typo-in-javautils
2 parents 99f6f63 + 29fabb1 commit 82bc5d9

File tree

14 files changed

+45
-32
lines changed

14 files changed

+45
-32
lines changed

core/pom.xml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -352,9 +352,9 @@
352352
</execution>
353353
</executions>
354354
<configuration>
355-
<tasks>
355+
<target>
356356
<unzip src="../python/lib/py4j-0.8.2.1-src.zip" dest="../python/build" />
357-
</tasks>
357+
</target>
358358
</configuration>
359359
</plugin>
360360
<plugin>

core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package org.apache.spark.scheduler
1919

2020
import java.nio.ByteBuffer
2121

22+
import scala.language.existentials
2223
import scala.util.control.NonFatal
2324

2425
import org.apache.spark._

core/src/test/java/org/apache/spark/JavaAPISuite.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,7 @@ public void sortByKey() {
184184
Assert.assertEquals(new Tuple2<Integer, Integer>(3, 2), sortedPairs.get(2));
185185
}
186186

187+
@SuppressWarnings("unchecked")
187188
@Test
188189
public void repartitionAndSortWithinPartitions() {
189190
List<Tuple2<Integer, Integer>> pairs = new ArrayList<Tuple2<Integer, Integer>>();
@@ -491,6 +492,7 @@ public Integer call(Integer a, Integer b) {
491492
Assert.assertEquals(33, sum);
492493
}
493494

495+
@SuppressWarnings("unchecked")
494496
@Test
495497
public void aggregateByKey() {
496498
JavaPairRDD<Integer, Integer> pairs = sc.parallelizePairs(
@@ -1556,7 +1558,7 @@ static class Class2 {}
15561558
@Test
15571559
public void testRegisterKryoClasses() {
15581560
SparkConf conf = new SparkConf();
1559-
conf.registerKryoClasses(new Class[]{ Class1.class, Class2.class });
1561+
conf.registerKryoClasses(new Class<?>[]{ Class1.class, Class2.class });
15601562
Assert.assertEquals(
15611563
Class1.class.getName() + "," + Class2.class.getName(),
15621564
conf.get("spark.kryo.classesToRegister"));

core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,14 @@ import org.apache.spark.deploy.SparkHadoopUtil
2424
import org.apache.spark.scheduler.{SparkListenerTaskEnd, SparkListener}
2525

2626
import org.scalatest.FunSuite
27-
import org.scalatest.matchers.ShouldMatchers
27+
import org.scalatest.Matchers
2828

2929
import org.apache.hadoop.conf.Configuration
3030
import org.apache.hadoop.fs.{Path, FileSystem}
3131

3232
import scala.collection.mutable.ArrayBuffer
3333

34-
class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with ShouldMatchers {
34+
class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with Matchers {
3535
test("input metrics when reading text file with single split") {
3636
val file = new File(getClass.getSimpleName + ".txt")
3737
val pw = new PrintWriter(new FileWriter(file))

core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ class DAGSchedulerSuite extends TestKit(ActorSystem("DAGSchedulerSuite")) with F
739739

740740
test("accumulator not calculated for resubmitted result stage") {
741741
//just for register
742-
val accum = new Accumulator[Int](0, SparkContext.IntAccumulatorParam)
742+
val accum = new Accumulator[Int](0, AccumulatorParam.IntAccumulatorParam)
743743
val finalRdd = new MyRDD(sc, 1, Nil)
744744
submit(finalRdd, Array(0))
745745
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))

mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ public void tearDown() {
4949
public void tfIdf() {
5050
// The tests are to check Java compatibility.
5151
HashingTF tf = new HashingTF();
52+
@SuppressWarnings("unchecked")
5253
JavaRDD<ArrayList<String>> documents = sc.parallelize(Lists.newArrayList(
5354
Lists.newArrayList("this is a sentence".split(" ")),
5455
Lists.newArrayList("this is another sentence".split(" ")),
@@ -68,6 +69,7 @@ public void tfIdf() {
6869
public void tfIdfMinimumDocumentFrequency() {
6970
// The tests are to check Java compatibility.
7071
HashingTF tf = new HashingTF();
72+
@SuppressWarnings("unchecked")
7173
JavaRDD<ArrayList<String>> documents = sc.parallelize(Lists.newArrayList(
7274
Lists.newArrayList("this is a sentence".split(" ")),
7375
Lists.newArrayList("this is another sentence".split(" ")),

sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ protected UserDefinedType() { }
3535
public boolean equals(Object o) {
3636
if (this == o) return true;
3737
if (o == null || getClass() != o.getClass()) return false;
38+
@SuppressWarnings("unchecked")
3839
UserDefinedType<UserType> that = (UserDefinedType<UserType>) o;
3940
return this.sqlType().equals(that.sqlType());
4041
}

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ import scala.util.Try
2424
import org.apache.hadoop.conf.Configuration
2525
import org.apache.hadoop.fs.{FileSystem, Path}
2626
import org.apache.hadoop.mapreduce.Job
27-
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
2827

28+
import parquet.format.converter.ParquetMetadataConverter
2929
import parquet.hadoop.{ParquetFileReader, Footer, ParquetFileWriter}
3030
import parquet.hadoop.metadata.{ParquetMetadata, FileMetaData}
3131
import parquet.hadoop.util.ContextUtil
@@ -458,7 +458,7 @@ private[parquet] object ParquetTypesConverter extends Logging {
458458
// ... and fallback to "_metadata" if no such file exists (which implies the Parquet file is
459459
// empty, thus normally the "_metadata" file is expected to be fairly small).
460460
.orElse(children.find(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE))
461-
.map(ParquetFileReader.readFooter(conf, _))
461+
.map(ParquetFileReader.readFooter(conf, _, ParquetMetadataConverter.NO_FILTER))
462462
.getOrElse(
463463
throw new IllegalArgumentException(s"Could not find Parquet metadata at path $path"))
464464
}

sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ public void constructComplexRow() {
141141
doubleValue, stringValue, timestampValue, null);
142142

143143
// Complex array
144+
@SuppressWarnings("unchecked")
144145
List<Map<String, Long>> arrayOfMaps = Arrays.asList(simpleMap);
145146
List<Row> arrayOfRows = Arrays.asList(simpleStruct);
146147

sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ import org.apache.spark.sql.catalyst.expressions._
2424
import org.apache.spark.sql.catalyst.dsl._
2525
import org.apache.spark.sql.test.TestSQLContext._
2626

27+
import scala.language.postfixOps
28+
2729
class DslQuerySuite extends QueryTest {
2830
import org.apache.spark.sql.TestData._
2931

0 commit comments

Comments
 (0)