Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import java.util.{ArrayList => JArrayList, List => JList, Map => JMap, Set => JS
import java.util.concurrent.TimeUnit

import scala.collection.JavaConverters._
import scala.util.Try
import scala.util.control.NonFatal

import org.apache.hadoop.fs.{FileSystem, Path}
Expand Down Expand Up @@ -585,7 +586,19 @@ private[client] class Shim_v0_13 extends Shim_v0_12 {
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]]
} else {
logDebug(s"Hive metastore filter is '$filter'.")
getPartitionsByFilterMethod.invoke(hive, table, filter).asInstanceOf[JArrayList[Partition]]
try {
getPartitionsByFilterMethod.invoke(hive, table, filter)
.asInstanceOf[JArrayList[Partition]]
} catch {
case e: InvocationTargetException =>
// SPARK-18167 retry to investigate the flaky test. This should be reverted before
// the release is cut.
val retry = Try(getPartitionsByFilterMethod.invoke(hive, table, filter))
val full = Try(getAllPartitionsMethod.invoke(hive, table))
logError("getPartitionsByFilter failed, retry success = " + retry.isSuccess)
logError("getPartitionsByFilter failed, full fetch success = " + full.isSuccess)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am wondering if we should also log all of partition specs?

Copy link
Contributor Author

@ericl ericl Oct 29, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah that's a good idea, we should definitely do it if turns out the retry fails

throw e
}
}

partitions.asScala.toSeq
Expand Down