Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
95 commits
Select commit Hold shift + click to select a range
6424e81
grammar: space after comma in addresses (Milpitas, San Jose)
jsoref Nov 11, 2020
c0482b4
grammar: a-coordinatematrix
jsoref Nov 29, 2020
445f630
grammar: a-javardd
jsoref Nov 29, 2020
9720d24
grammar: an-vertexrdd
jsoref Nov 29, 2020
a011d6f
spelling: GitHub
jsoref Nov 11, 2020
11f6a8d
spelling: across
jsoref Nov 11, 2020
acfd821
spelling: against
jsoref Nov 11, 2020
baa99b0
spelling: antecedent
jsoref Nov 11, 2020
78b34ba
spelling: attempt
jsoref Nov 11, 2020
e1a766a
spelling: balloon
jsoref Nov 11, 2020
19bb358
spelling: broadcast
jsoref Nov 11, 2020
b7957d4
spelling: carry
jsoref Nov 11, 2020
e153eec
spelling: centered
jsoref Nov 11, 2020
cf75ae6
spelling: class.forname
jsoref Nov 11, 2020
71900db
spelling: classification
jsoref Nov 11, 2020
9e8732e
spelling: column
jsoref Nov 11, 2020
78c0be7
spelling: configured
jsoref Nov 11, 2020
4f0e0a8
spelling: conjunction
jsoref Nov 11, 2020
6eb9e83
spelling: contain
jsoref Nov 11, 2020
0bc1705
spelling: convertible
jsoref Nov 11, 2020
9a1d8d2
spelling: crypto
jsoref Nov 11, 2020
1615fd3
spelling: cumulative
jsoref Nov 11, 2020
4a60c0d
spelling: description
jsoref Nov 11, 2020
f00697d
spelling: details
jsoref Nov 11, 2020
65d457b
spelling: deviance
jsoref Nov 11, 2020
1b57213
spelling: discovery
jsoref Nov 11, 2020
62ff427
spelling: displayed
jsoref Nov 11, 2020
04549ba
spelling: distinguish
jsoref Nov 11, 2020
2ac5bf1
spelling: dynamic
jsoref Nov 11, 2020
e0aea20
spelling: e.g.
jsoref Nov 11, 2020
a25a1da
spelling: evaluate
jsoref Nov 11, 2020
86f6f08
spelling: exception
jsoref Nov 11, 2020
7376119
spelling: excluded
jsoref Nov 29, 2020
2ec3dc4
spelling: execution
jsoref Nov 11, 2020
99c796e
spelling: expected
jsoref Nov 11, 2020
c0ad488
spelling: for example
jsoref Nov 29, 2020
1b139db
spelling: forgetfulness
jsoref Nov 11, 2020
d6be585
spelling: function
jsoref Nov 11, 2020
1c4a597
spelling: gauges
jsoref Nov 11, 2020
dd8ac80
spelling: heuristic
jsoref Nov 11, 2020
2cf7a13
spelling: highest
jsoref Nov 11, 2020
6742711
spelling: i.e.
jsoref Nov 11, 2020
9ea646d
spelling: implementers
jsoref Nov 11, 2020
715e77b
spelling: indentation
jsoref Nov 11, 2020
57d9e28
spelling: index
jsoref Nov 11, 2020
3b302d3
spelling: indices
jsoref Nov 11, 2020
7570355
spelling: individual
jsoref Nov 11, 2020
0726e6f
spelling: initial
jsoref Nov 11, 2020
6255aa0
spelling: interpreter
jsoref Nov 11, 2020
74fc67f
spelling: interruptible
jsoref Nov 11, 2020
5f15608
spelling: javascript
jsoref Nov 11, 2020
967266a
spelling: lifecycle
jsoref Nov 11, 2020
5515014
spelling: listener
jsoref Nov 11, 2020
e8b4a7e
spelling: localhost
jsoref Nov 11, 2020
5f5ec41
spelling: logphat
jsoref Nov 11, 2020
ab010ea
spelling: managed
jsoref Nov 11, 2020
a77c72e
spelling: matrix
jsoref Nov 11, 2020
d0c8367
spelling: menlo
jsoref Nov 11, 2020
896c783
spelling: mercurry / vemus / afollo / Blackberry / vesus / ayollo (flag)
jsoref Nov 12, 2020
c6fe422
spelling: minutes
jsoref Nov 11, 2020
190cb0f
spelling: mismatched
jsoref Nov 11, 2020
8120182
spelling: modulus
jsoref Nov 29, 2020
4e47cc5
spelling: need
jsoref Nov 11, 2020
b224d99
spelling: object
jsoref Nov 11, 2020
8ed55db
spelling: order
jsoref Nov 11, 2020
2b355d1
spelling: output
jsoref Nov 11, 2020
b8d4c1e
spelling: param
jsoref Nov 11, 2020
0271b25
spelling: partial
jsoref Nov 11, 2020
575ef13
spelling: partition
jsoref Nov 11, 2020
06097bb
spelling: partitioning
jsoref Nov 11, 2020
0f08ac3
spelling: partitions
jsoref Nov 11, 2020
9f64c01
spelling: preceded
jsoref Nov 11, 2020
46ccf9c
spelling: preemption
jsoref Nov 11, 2020
b5d0aeb
spelling: preempts
jsoref Nov 11, 2020
0c11b56
spelling: propagate
jsoref Nov 29, 2020
af0d4f1
spelling: rec
jsoref Nov 11, 2020
867bfdc
spelling: representation
jsoref Nov 11, 2020
adba6c9
spelling: rewritten
jsoref Nov 11, 2020
b936751
spelling: rolledback
jsoref Nov 11, 2020
3fd8d07
spelling: separate
jsoref Nov 11, 2020
c0a054c
spelling: should
jsoref Nov 11, 2020
08f5201
spelling: strategy
jsoref Nov 11, 2020
4282115
spelling: surplus
jsoref Nov 11, 2020
bbaa246
spelling: synchronize
jsoref Nov 11, 2020
58a28e3
spelling: table
jsoref Nov 29, 2020
b4c7c48
spelling: terminated
jsoref Nov 11, 2020
f777813
spelling: the
jsoref Nov 11, 2020
7a8de56
spelling: topology
jsoref Nov 11, 2020
afe7030
spelling: transitive
jsoref Nov 11, 2020
fc1fdbe
spelling: unexpected
jsoref Nov 11, 2020
1c6c5d6
spelling: unflattened
jsoref Nov 11, 2020
1f644d2
spelling: variance
jsoref Nov 11, 2020
7e31053
spelling: visible
jsoref Nov 11, 2020
7f08e8d
spelling: work
jsoref Nov 11, 2020
295f588
spelling: written
jsoref Nov 11, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bin/docker-image-tool.sh
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ Examples:
- Build and push JDK11-based image for multiple archs to docker.io/myrepo
$0 -r docker.io/myrepo -t v3.0.0 -X -b java_image_tag=11-jre-slim build
# Note: buildx, which does cross building, needs to do the push during build
# So there is no seperate push step with -X
# So there is no separate push step with -X

EOF
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ function preprocessGraphLayout(g, forJob) {
}

/*
* Helper function to size the SVG appropriately such that all elements are displyed.
* Helper function to size the SVG appropriately such that all elements are displayed.
* This assumes that all outermost elements are clusters (rectangles).
*/
function resizeSvg(svg) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ function getTimeZone() {
return Intl.DateTimeFormat().resolvedOptions().timeZone;
} catch(ex) {
// Get time zone from a string representing the date,
// eg. "Thu Nov 16 2017 01:13:32 GMT+0800 (CST)" -> "CST"
// e.g. "Thu Nov 16 2017 01:13:32 GMT+0800 (CST)" -> "CST"
return new Date().toString().match(/\((.*)\)/)[1];
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ private[spark] class ExecutorAllocationManager(
executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS)
}

// copy the maps inside synchonize to ensure not being modified
// copy the maps inside synchronize to ensure not being modified
val (numExecutorsTarget, numLocalityAware) = synchronized {
val numTarget = numExecutorsTargetPerResourceProfileId.toMap
val numLocality = numLocalityAwareTasksPerResourceProfileId.toMap
Expand Down Expand Up @@ -379,7 +379,7 @@ private[spark] class ExecutorAllocationManager(

// We lower the target number of executors but don't actively kill any yet. Killing is
// controlled separately by an idle timeout. It's still helpful to reduce
// the target number in case an executor just happens to get lost (eg., bad hardware,
// the target number in case an executor just happens to get lost (e.g., bad hardware,
// or the cluster manager preempts it) -- in that case, there is no point in trying
// to immediately get a new executor, since we wouldn't even use it yet.
decrementExecutorsFromTarget(maxNeeded, rpId, updatesNeeded)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
/**
* Return a RDD containing only the elements in the inclusive range `lower` to `upper`.
* If the RDD has been partitioned using a `RangePartitioner`, then this operation can be
* performed efficiently by only scanning the partitions that might containt matching elements.
* performed efficiently by only scanning the partitions that might contain matching elements.
* Otherwise, a standard `filter` is applied to all partitions.
*
* @since 3.1.0
Expand All @@ -955,7 +955,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
/**
* Return a RDD containing only the elements in the inclusive range `lower` to `upper`.
* If the RDD has been partitioned using a `RangePartitioner`, then this operation can be
* performed efficiently by only scanning the partitions that might containt matching elements.
* performed efficiently by only scanning the partitions that might contain matching elements.
* Otherwise, a standard `filter` is applied to all partitions.
*
* @since 3.1.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {

/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* This should ''not'' be called by users directly, but is available for implementers of custom
* subclasses of RDD.
*/
def iterator(split: Partition, taskContext: TaskContext): JIterator[T] =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,14 @@ import org.apache.spark.util._
private[spark] class PythonRDD(
parent: RDD[_],
func: PythonFunction,
preservePartitoning: Boolean,
preservePartitioning: Boolean,
isFromBarrier: Boolean = false)
extends RDD[Array[Byte]](parent) {

override def getPartitions: Array[Partition] = firstParent.partitions

override val partitioner: Option[Partitioner] = {
if (preservePartitoning) firstParent.partitioner else None
if (preservePartitioning) firstParent.partitioner else None
}

val asJavaRDD: JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this)
Expand Down Expand Up @@ -837,7 +837,7 @@ private[spark] class PythonBroadcast(@transient var path: String) extends Serial
* We might be serializing a really large object from python -- we don't want
* python to buffer the whole thing in memory, nor can it write to a file,
* so we don't know the length in advance. So python writes it in chunks, each chunk
* preceeded by a length, till we get a "length" of -1 which serves as EOF.
* preceded by a length, till we get a "length" of -1 which serves as EOF.
*
* Tested from python tests.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ private[deploy] object JsonProtocol {
}

/**
* Export the [[ApplicationInfo]] to a Json objec. An [[ApplicationInfo]] consists of the
* Export the [[ApplicationInfo]] to a Json object. An [[ApplicationInfo]] consists of the
* information of an application.
*
* @return a Json object containing the following fields:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ private[spark] class SparkSubmit extends Logging {
// In K8s client mode, when in the driver, add resolved jars early as we might need
// them at the submit time for artifact downloading.
// For example we might use the dependencies for downloading
// files from a Hadoop Compatible fs eg. S3. In this case the user might pass:
// files from a Hadoop Compatible fs e.g. S3. In this case the user might pass:
// --packages com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6
if (isKubernetesClusterModeDriver) {
val loader = getSubmitClassLoader(sparkConf)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -722,7 +722,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)

/**
* Replay the given log file, saving the application in the listing db.
* Visable for testing
* Visible for testing
*/
private[history] def doMergeApplicationListing(
reader: EventLogFileReader,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ private[history] class HybridStore extends KVStore {
// A background thread that dumps data from inMemoryStore to levelDB
private var backgroundThread: Thread = null

// A hash map that stores all classes that had been writen to inMemoryStore
// A hash map that stores all classes that had been written to inMemoryStore
// Visible for testing
private[history] val klassMap = new ConcurrentHashMap[Class[_], Boolean]

Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/executor/Executor.scala
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,7 @@ private[spark] class Executor(
// SPARK-20904: Do not report failure to driver if if happened during shut down. Because
// libraries may set up shutdown hooks that race with running tasks during shutdown,
// spurious failures may occur and can result in improper accounting in the driver (e.g.
// the task failure would not be ignored if the shutdown happened because of premption,
// the task failure would not be ignored if the shutdown happened because of preemption,
// instead of an app issue).
if (!ShutdownHookManager.inShutdown()) {
val (accums, accUpdates) = collectAccumulatorsAndResetStatusOnFailure(taskStartTimeNs)
Expand Down Expand Up @@ -742,7 +742,7 @@ private[spark] class Executor(
* sending a Thread.interrupt(), and monitoring the task until it finishes.
*
* Spark's current task cancellation / task killing mechanism is "best effort" because some tasks
* may not be interruptable or may not respond to their "killed" flags being set. If a significant
* may not be interruptible or may not respond to their "killed" flags being set. If a significant
* fraction of a cluster's task slots are occupied by tasks that have been marked as killed but
* remain running then this can lead to a situation where new jobs and tasks are starved of
* resources that are being used by these zombie tasks.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ private[spark] class MetricsConfig(conf: SparkConf) extends Logging {
*
* @param prop the flat list of properties to "unflatten" based on prefixes
* @param regex the regex that the prefix has to comply with
* @return an unflatted map, mapping prefix with sub-properties under that prefix
* @return an unflattened map, mapping prefix with sub-properties under that prefix
*/
def subProperties(prop: Properties, regex: Regex): mutable.HashMap[String, Properties] = {
val subProperties = new mutable.HashMap[String, Properties]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ private[spark] class PrometheusServlet(
def getMetricsSnapshot(request: HttpServletRequest): String = {
import scala.collection.JavaConverters._

val guagesLabel = """{type="gauges"}"""
val gaugesLabel = """{type="gauges"}"""
val countersLabel = """{type="counters"}"""
val metersLabel = countersLabel
val histogramslabels = """{type="histograms"}"""
Expand All @@ -65,8 +65,8 @@ private[spark] class PrometheusServlet(
val sb = new StringBuilder()
registry.getGauges.asScala.foreach { case (k, v) =>
if (!v.getValue.isInstanceOf[String]) {
sb.append(s"${normalizeKey(k)}Number$guagesLabel ${v.getValue}\n")
sb.append(s"${normalizeKey(k)}Value$guagesLabel ${v.getValue}\n")
sb.append(s"${normalizeKey(k)}Number$gaugesLabel ${v.getValue}\n")
sb.append(s"${normalizeKey(k)}Value$gaugesLabel ${v.getValue}\n")
}
}
registry.getCounters.asScala.foreach { case (k, v) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
if (buckets.length < 2) {
throw new IllegalArgumentException("buckets array must have at least two elements")
}
// The histogramPartition function computes the partail histogram for a given
// The histogramPartition function computes the partial histogram for a given
// partition. The provided bucketFunction determines which bucket in the array
// to increment or returns None if there is no bucket. This is done so we can
// specialize for uniformly distributed buckets and save the O(log n) binary
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,10 @@ class OrderedRDDFunctions[K : Ordering : ClassTag,

val rddToFilter: RDD[P] = self.partitioner match {
case Some(rp: RangePartitioner[K, V]) =>
val partitionIndicies = (rp.getPartition(lower), rp.getPartition(upper)) match {
val partitionIndices = (rp.getPartition(lower), rp.getPartition(upper)) match {
case (l, u) => Math.min(l, u) to Math.max(l, u)
}
PartitionPruningRDD.create(self, partitionIndicies.contains)
PartitionPruningRDD.create(self, partitionIndices.contains)
case _ =>
self
}
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ abstract class RDD[T: ClassTag](

/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* This should ''not'' be called by users directly, but is available for implementers of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.resource
import org.apache.spark.annotation.{Evolving, Since}

/**
* A task resource request. This is used in conjuntion with the ResourceProfile to
* A task resource request. This is used in conjunction with the ResourceProfile to
* programmatically specify the resources needed for an RDD that will be applied at the
* stage level.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -254,14 +254,14 @@ private[netty] class NettyRpcEnv(

val timeoutCancelable = timeoutScheduler.schedule(new Runnable {
override def run(): Unit = {
val remoteReceAddr = if (remoteAddr == null) {
val remoteRecAddr = if (remoteAddr == null) {
Try {
message.receiver.client.getChannel.remoteAddress()
}.toOption.orNull
} else {
remoteAddr
}
onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteReceAddr} " +
onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteRecAddr} " +
s"in ${timeout.duration}"))
}
}, timeout.duration.toNanos, TimeUnit.NANOSECONDS)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ private[spark] object BarrierJobAllocationFailed {
val ERROR_MESSAGE_RUN_BARRIER_WITH_UNSUPPORTED_RDD_CHAIN_PATTERN =
"[SPARK-24820][SPARK-24821]: Barrier execution mode does not allow the following pattern of " +
"RDD chain within a barrier stage:\n1. Ancestor RDDs that have different number of " +
"partitions from the resulting RDD (eg. union()/coalesce()/first()/take()/" +
"partitions from the resulting RDD (e.g. union()/coalesce()/first()/take()/" +
"PartitionPruningRDD). A workaround for first()/take() can be barrierRdd.collect().head " +
"(scala) or barrierRdd.collect()[0] (python).\n" +
"2. An RDD that depends on multiple barrier RDDs (eg. barrierRdd1.zip(barrierRdd2))."
"2. An RDD that depends on multiple barrier RDDs (e.g. barrierRdd1.zip(barrierRdd2))."

// Error message when running a barrier stage with dynamic resource allocation enabled.
val ERROR_MESSAGE_RUN_BARRIER_WITH_DYN_ALLOCATION =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -409,9 +409,9 @@ private[spark] class DAGScheduler(
/**
* Check to make sure we don't launch a barrier stage with unsupported RDD chain pattern. The
* following patterns are not supported:
* 1. Ancestor RDDs that have different number of partitions from the resulting RDD (eg.
* 1. Ancestor RDDs that have different number of partitions from the resulting RDD (e.g.
* union()/coalesce()/first()/take()/PartitionPruningRDD);
* 2. An RDD that depends on multiple barrier RDDs (eg. barrierRdd1.zip(barrierRdd2)).
* 2. An RDD that depends on multiple barrier RDDs (e.g. barrierRdd1.zip(barrierRdd2)).
*/
private def checkBarrierStageWithRDDChainPattern(rdd: RDD[_], numTasksInStage: Int): Unit = {
if (rdd.isBarrier() &&
Expand Down Expand Up @@ -459,7 +459,7 @@ private[spark] class DAGScheduler(

/**
* We don't support run a barrier stage with dynamic resource allocation enabled, it shall lead
* to some confusing behaviors (eg. with dynamic resource allocation enabled, it may happen that
* to some confusing behaviors (e.g. with dynamic resource allocation enabled, it may happen that
* we acquire some executors (but not enough to launch all the tasks in a barrier stage) and
* later release them due to executor idle time expire, and then acquire again).
*
Expand Down Expand Up @@ -1555,7 +1555,7 @@ private[spark] class DAGScheduler(
event.reason)

if (!stageIdToStage.contains(task.stageId)) {
// The stage may have already finished when we get this event -- eg. maybe it was a
// The stage may have already finished when we get this event -- e.g. maybe it was a
// speculative task. It is important that we send the TaskEnd event in any case, so listeners
// are properly notified and can chose to handle it. For instance, some listeners are
// doing their own accounting and if they don't get the task end event they think
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import org.apache.spark.util.{Clock, SystemClock, Utils}
* additional logic for exclusion of executors and nodes for individual tasks and stages which
* works in concert with the logic here.
*
* The tracker needs to deal with a variety of workloads, eg.:
* The tracker needs to deal with a variety of workloads, e.g.:
*
* * bad user code -- this may lead to many task failures, but that should not count against
* individual executors
Expand Down Expand Up @@ -362,7 +362,7 @@ private[scheduler] class HealthTracker (
* Apply the timeout to individual tasks. This is to prevent one-off failures that are very
* spread out in time (and likely have nothing to do with problems on the executor) from
* triggering exlusion. However, note that we do *not* remove executors and nodes from
* being excluded as we expire individual task failures -- each have their own timeout. Eg.,
* being excluded as we expire individual task failures -- each have their own timeout. E.g.,
* suppose:
* * timeout = 10, maxFailuresPerExec = 2
* * Task 1 fails on exec 1 at time 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ private[spark] class TaskSetManager(
/**
* Track the set of locality levels which are valid given the tasks locality preferences and
* the set of currently available executors. This is updated as executors are added and removed.
* This allows a performance optimization, of skipping levels that aren't relevant (eg., skip
* This allows a performance optimization, of skipping levels that aren't relevant (e.g., skip
* PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors).
*/
private[scheduler] var myLocalityLevels = computeValidLocalityLevels()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ private[spark] object CryptoStreamUtils extends Logging {
}

/**
* SPARK-25535. The commons-cryto library will throw InternalError if something goes
* SPARK-25535. The commons-crypto library will throw InternalError if something goes
* wrong, and leave bad state behind in the Java wrappers, so it's not safe to use them
* afterwards. This wrapper detects that situation and avoids further calls into the
* commons-crypto code, while still allowing the underlying streams to be closed.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1103,7 +1103,7 @@ private[spark] class BlockManager(
blockSize: Long): Option[ManagedBuffer] = {
val file = ExecutorDiskUtils.getFile(localDirs, subDirsPerLocalDir, blockId.name)
if (file.exists()) {
val mangedBuffer = securityManager.getIOEncryptionKey() match {
val managedBuffer = securityManager.getIOEncryptionKey() match {
case Some(key) =>
// Encrypted blocks cannot be memory mapped; return a special object that does decryption
// and provides InputStream / FileRegion implementations for reading the data.
Expand All @@ -1114,7 +1114,7 @@ private[spark] class BlockManager(
val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle")
new FileSegmentManagedBuffer(transportConf, file, 0, file.length)
}
Some(mangedBuffer)
Some(managedBuffer)
} else {
None
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ class BlockManagerMasterEndpoint(
blockLocations.remove(blockId)
logWarning(s"No more replicas available for $blockId !")
} else if (proactivelyReplicate && (blockId.isRDD || blockId.isInstanceOf[TestBlockId])) {
// As a heursitic, assume single executor failure to find out the number of replicas that
// As a heuristic, assume single executor failure to find out the number of replicas that
// existed before failure
val maxReplicas = locations.size + 1
val i = (new Random(blockId.hashCode)).nextInt(locations.size)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ private[ui] class AllJobsPage(parent: JobsTab, store: AppStatusStore) extends We
}

// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a Javascript string literal.
// extra layers of escaping in order to embed this in a JavaScript string literal.
val escapedDesc = Utility.escape(jobDescription)
val jsEscapedDescForTooltip = StringEscapeUtils.escapeEcmaScript(Utility.escape(escapedDesc))
val jsEscapedDescForLabel = StringEscapeUtils.escapeEcmaScript(escapedDesc)
Expand Down
Loading