Skip to content

Commit be6068d

Browse files
author
Marcelo Vanzin
committed
Restore shutdown hook to clean up staging dir.
1 parent 5150993 commit be6068d

File tree

1 file changed

+16
-0
lines changed

1 file changed

+16
-0
lines changed

yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import akka.actor._
2828
import akka.remote._
2929
import org.apache.hadoop.conf.Configuration
3030
import org.apache.hadoop.fs.{FileSystem, Path}
31+
import org.apache.hadoop.util.ShutdownHookManager
3132
import org.apache.hadoop.yarn.api._
3233
import org.apache.hadoop.yarn.api.records._
3334
import org.apache.hadoop.yarn.conf.YarnConfiguration
@@ -84,6 +85,21 @@ private[spark] class ApplicationMaster(args: ApplicationMasterArguments,
8485

8586
logInfo("ApplicationAttemptId: " + client.getAttemptId())
8687

88+
// If this is the last attempt, register a shutdown hook to cleanup the staging dir
89+
// after the app is finished, in case it does not exit through the expected means.
90+
// Use priority 30 as it's higher than HDFS. It's the same priority MapReduce is using.
91+
if (isLastAttempt()) {
92+
val cleanupHook = new Runnable {
93+
override def run() {
94+
logInfo("AppMaster received a signal.")
95+
if (!finished) {
96+
cleanupStagingDir()
97+
}
98+
}
99+
}
100+
ShutdownHookManager.get().addShutdownHook(cleanupHook, 30)
101+
}
102+
87103
// Call this to force generation of secret so it gets populated into the
88104
// Hadoop UGI. This has to happen before the startUserClass which does a
89105
// doAs in order for the credentials to be passed on to the executor containers.

0 commit comments

Comments
 (0)