|
15 | 15 | * limitations under the License. |
16 | 16 | */ |
17 | 17 |
|
18 | | -package org.apache.spark; |
| 18 | +package org.apache.spark |
19 | 19 |
|
20 | | -import java.io.Serializable; |
| 20 | +import java.io.Serializable |
21 | 21 |
|
22 | | -import scala.Function0; |
23 | | -import scala.Function1; |
24 | | -import scala.Unit; |
| 22 | +import org.apache.spark.annotation.DeveloperApi |
| 23 | +import org.apache.spark.executor.TaskMetrics |
| 24 | +import org.apache.spark.util.TaskCompletionListener |
25 | 25 |
|
26 | | -import org.apache.spark.annotation.DeveloperApi; |
27 | | -import org.apache.spark.executor.TaskMetrics; |
28 | | -import org.apache.spark.util.TaskCompletionListener; |
29 | 26 |
|
30 | | -/** |
31 | | - * Contextual information about a task which can be read or mutated during |
32 | | - * execution. To access the TaskContext for a running task use |
33 | | - * TaskContext.get(). |
34 | | - */ |
35 | | -public abstract class TaskContext implements Serializable { |
| 27 | +object TaskContext { |
36 | 28 | /** |
37 | 29 | * Return the currently active TaskContext. This can be called inside of |
38 | 30 | * user functions to access contextual information about running tasks. |
39 | 31 | */ |
40 | | - public static TaskContext get() { |
41 | | - return taskContext.get(); |
42 | | - } |
| 32 | + def get(): TaskContext = taskContext.get |
| 33 | + |
| 34 | + private val taskContext: ThreadLocal[TaskContext] = new ThreadLocal[TaskContext] |
| 35 | + |
| 36 | + private[spark] def setTaskContext(tc: TaskContext): Unit = taskContext.set(tc) |
| 37 | + |
| 38 | + private[spark] def unset(): Unit = taskContext.remove() |
| 39 | +} |
43 | 40 |
|
44 | | - private static ThreadLocal<TaskContext> taskContext = |
45 | | - new ThreadLocal<TaskContext>(); |
46 | 41 |
|
47 | | - static void setTaskContext(TaskContext tc) { |
48 | | - taskContext.set(tc); |
49 | | - } |
| 42 | +/** |
| 43 | + * Contextual information about a task which can be read or mutated during |
| 44 | + * execution. To access the TaskContext for a running task, use: |
| 45 | + * {{{ |
| 46 | + * org.apache.spark.TaskContext.get() |
| 47 | + * }}} |
| 48 | + */ |
| 49 | +abstract class TaskContext extends Serializable { |
| 50 | + // Note: TaskContext must NOT define a get method. Otherwise it will prevent the Scala compiler |
| 51 | + // from generating a static get method (based on the companion object's get method). |
50 | 52 |
|
51 | | - static void unset() { |
52 | | - taskContext.remove(); |
53 | | - } |
| 53 | + // Note: getters in this class are defined with parentheses to maintain backward compatibility. |
54 | 54 |
|
55 | 55 | /** |
56 | | - * Whether the task has completed. |
| 56 | + * Returns true if the task has completed. |
57 | 57 | */ |
58 | | - public abstract boolean isCompleted(); |
| 58 | + def isCompleted(): Boolean |
59 | 59 |
|
60 | 60 | /** |
61 | | - * Whether the task has been killed. |
| 61 | + * Returns true if the task has been killed. |
62 | 62 | */ |
63 | | - public abstract boolean isInterrupted(); |
| 63 | + def isInterrupted(): Boolean |
64 | 64 |
|
65 | | - /** @deprecated use {@link #isRunningLocally()} */ |
66 | | - @Deprecated |
67 | | - public abstract boolean runningLocally(); |
| 65 | + /** @deprecated use { @link #isRunningLocally()}*/ |
| 66 | + @deprecated("1.2.0", "use isRunningLocally") |
| 67 | + def runningLocally(): Boolean |
68 | 68 |
|
69 | | - public abstract boolean isRunningLocally(); |
| 69 | + /** |
| 70 | + * Returns true if the task is running locally in the driver program. |
| 71 | + * @return |
| 72 | + */ |
| 73 | + def isRunningLocally(): Boolean |
70 | 74 |
|
71 | 75 | /** |
72 | | - * Add a (Java friendly) listener to be executed on task completion. |
| 76 | + * Adds a (Java friendly) listener to be executed on task completion. |
73 | 77 | * This will be called in all situation - success, failure, or cancellation. |
74 | 78 | * An example use is for HadoopRDD to register a callback to close the input stream. |
75 | 79 | */ |
76 | | - public abstract TaskContext addTaskCompletionListener(TaskCompletionListener listener); |
| 80 | + def addTaskCompletionListener(listener: TaskCompletionListener): TaskContext |
77 | 81 |
|
78 | 82 | /** |
79 | | - * Add a listener in the form of a Scala closure to be executed on task completion. |
| 83 | + * Adds a listener in the form of a Scala closure to be executed on task completion. |
80 | 84 | * This will be called in all situations - success, failure, or cancellation. |
81 | 85 | * An example use is for HadoopRDD to register a callback to close the input stream. |
82 | 86 | */ |
83 | | - public abstract TaskContext addTaskCompletionListener(final Function1<TaskContext, Unit> f); |
| 87 | + def addTaskCompletionListener(f: (TaskContext) => Unit): TaskContext |
84 | 88 |
|
85 | 89 | /** |
86 | | - * Add a callback function to be executed on task completion. An example use |
| 90 | + * Adds a callback function to be executed on task completion. An example use |
87 | 91 | * is for HadoopRDD to register a callback to close the input stream. |
88 | 92 | * Will be called in any situation - success, failure, or cancellation. |
89 | 93 | * |
90 | | - * @deprecated use {@link #addTaskCompletionListener(scala.Function1)} |
| 94 | + * @deprecated use { @link #addTaskCompletionListener(scala.Function1)} |
91 | 95 | * |
92 | 96 | * @param f Callback function. |
93 | 97 | */ |
94 | | - @Deprecated |
95 | | - public abstract void addOnCompleteCallback(final Function0<Unit> f); |
| 98 | + @deprecated("1.2.0", "use addTaskCompletionListener") |
| 99 | + def addOnCompleteCallback(f: () => Unit) |
96 | 100 |
|
97 | 101 | /** |
98 | 102 | * The ID of the stage that this task belong to. |
99 | 103 | */ |
100 | | - public abstract int stageId(); |
| 104 | + def stageId(): Int |
101 | 105 |
|
102 | 106 | /** |
103 | 107 | * The ID of the RDD partition that is computed by this task. |
104 | 108 | */ |
105 | | - public abstract int partitionId(); |
| 109 | + def partitionId(): Int |
106 | 110 |
|
107 | 111 | /** |
108 | 112 | * How many times this task has been attempted. The first task attempt will be assigned |
109 | 113 | * attemptNumber = 0, and subsequent attempts will have increasing attempt numbers. |
110 | 114 | */ |
111 | | - public abstract int attemptNumber(); |
| 115 | + def attemptNumber(): Int |
112 | 116 |
|
113 | | - /** @deprecated use {@link #taskAttemptId()}; it was renamed to avoid ambiguity. */ |
114 | | - @Deprecated |
115 | | - public abstract long attemptId(); |
| 117 | + /** @deprecated use { @link #taskAttemptId()}; it was renamed to avoid ambiguity. */ |
| 118 | + @deprecated("1.3.0", "use attemptNumber") |
| 119 | + def attemptId(): Long |
116 | 120 |
|
117 | 121 | /** |
118 | 122 | * An ID that is unique to this task attempt (within the same SparkContext, no two task attempts |
119 | 123 | * will share the same attempt ID). This is roughly equivalent to Hadoop's TaskAttemptID. |
120 | 124 | */ |
121 | | - public abstract long taskAttemptId(); |
| 125 | + def taskAttemptId(): Long |
122 | 126 |
|
123 | 127 | /** ::DeveloperApi:: */ |
124 | 128 | @DeveloperApi |
125 | | - public abstract TaskMetrics taskMetrics(); |
| 129 | + def taskMetrics(): TaskMetrics |
126 | 130 | } |
0 commit comments