Skip to content

Commit e1855e5

Browse files
committed
Fix a handful of misc. IntelliJ inspections
1 parent 39434f9 commit e1855e5

File tree

3 files changed

+3
-6
lines changed

3 files changed

+3
-6
lines changed

core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ private SpillInfo writeSpillFile() throws IOException {
139139

140140
// Currently, we need to open a new DiskBlockObjectWriter for each partition; we can avoid this
141141
// after SPARK-5581 is fixed.
142-
BlockObjectWriter writer = null;
142+
BlockObjectWriter writer;
143143

144144
// Small writes to DiskBlockObjectWriter will be fairly inefficient. Since there doesn't seem to
145145
// be an API to directly transfer bytes from managed memory to the disk writer, we buffer
@@ -202,7 +202,6 @@ private SpillInfo writeSpillFile() throws IOException {
202202
writeBuffer,
203203
PlatformDependent.BYTE_ARRAY_OFFSET,
204204
toTransfer);
205-
assert (writer != null); // To suppress an IntelliJ warning
206205
writer.write(writeBuffer, 0, toTransfer);
207206
recordReadPosition += toTransfer;
208207
dataRemaining -= toTransfer;

core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,8 @@
1717

1818
package org.apache.spark.shuffle.unsafe;
1919

20-
import java.io.IOException;
2120
import java.util.Comparator;
2221

23-
import org.apache.spark.unsafe.memory.MemoryBlock;
2422
import org.apache.spark.util.collection.Sorter;
2523

2624
final class UnsafeShuffleSorter {
@@ -71,7 +69,7 @@ public long getMemoryUsage() {
7169
* @param partitionId the partition id, which must be less than or equal to
7270
* {@link PackedRecordPointer#MAXIMUM_PARTITION_ID}.
7371
*/
74-
public void insertRecord(long recordPointer, int partitionId) throws IOException {
72+
public void insertRecord(long recordPointer, int partitionId) {
7573
if (!hasSpaceForAnotherRecord()) {
7674
expandSortBuffer();
7775
}

core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOEx
135135
closeAndWriteOutput();
136136
} catch (Exception e) {
137137
// Unfortunately, we have to catch Exception here in order to ensure proper cleanup after
138-
// errors becuase Spark's Scala code, or users' custom Serializers, might throw arbitrary
138+
// errors because Spark's Scala code, or users' custom Serializers, might throw arbitrary
139139
// unchecked exceptions.
140140
try {
141141
sorter.cleanupAfterError();

0 commit comments

Comments
 (0)