|
| 1 | +package org.apache.spark.input |
| 2 | + |
| 3 | +import scala.collection.JavaConversions._ |
| 4 | +import com.google.common.io.{ByteStreams, Closeables} |
| 5 | +import org.apache.hadoop.mapreduce.InputSplit |
| 6 | +import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit |
| 7 | +import org.apache.hadoop.mapreduce.RecordReader |
| 8 | +import org.apache.hadoop.mapreduce.TaskAttemptContext |
| 9 | +import org.apache.hadoop.fs.Path |
| 10 | +import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat |
| 11 | +import org.apache.hadoop.mapreduce.JobContext |
| 12 | +import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader |
| 13 | + |
| 14 | + |
| 15 | +/** |
| 16 | + * The new (Hadoop 2.0) InputFormat for while binary files (not be to be confused with the recordreader itself) |
| 17 | + */ |
| 18 | +@serializable abstract class BinaryFileInputFormat[T] |
| 19 | + extends CombineFileInputFormat[String,T] { |
| 20 | + override protected def isSplitable(context: JobContext, file: Path): Boolean = false |
| 21 | + /** |
| 22 | + * Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API. |
| 23 | + */ |
| 24 | + def setMaxSplitSize(context: JobContext, minPartitions: Int) { |
| 25 | + val files = listStatus(context) |
| 26 | + val totalLen = files.map { file => |
| 27 | + if (file.isDir) 0L else file.getLen |
| 28 | + }.sum |
| 29 | + |
| 30 | + /** val maxSplitSize = Math.ceil(totalLen * 1.0 / |
| 31 | + (if (minPartitions == 0) 1 else minPartitions)).toLong **/ |
| 32 | + val maxSplitSize = Math.ceil(totalLen*1.0/files.length).toLong |
| 33 | + super.setMaxSplitSize(maxSplitSize) |
| 34 | + } |
| 35 | + |
| 36 | + def createRecordReader(split: InputSplit, taContext: TaskAttemptContext): RecordReader[String,T] |
| 37 | + |
| 38 | +} |
| 39 | + |
| 40 | +/** |
| 41 | + * A [[org.apache.hadoop.mapreduce.RecordReader RecordReader]] for reading a single whole tiff file |
| 42 | + * out in a key-value pair, where the key is the file path and the value is the entire content of |
| 43 | + * the file as a TSliceReader (to keep the size information |
| 44 | + */ |
| 45 | +@serializable abstract class BinaryRecordReader[T]( |
| 46 | + split: CombineFileSplit, |
| 47 | + context: TaskAttemptContext, |
| 48 | + index: Integer) |
| 49 | + extends RecordReader[String, T] { |
| 50 | + |
| 51 | + private val path = split.getPath(index) |
| 52 | + private val fs = path.getFileSystem(context.getConfiguration) |
| 53 | + |
| 54 | + // True means the current file has been processed, then skip it. |
| 55 | + private var processed = false |
| 56 | + |
| 57 | + private val key = path.toString |
| 58 | + private var value: T = null.asInstanceOf[T] |
| 59 | + override def initialize(split: InputSplit, context: TaskAttemptContext) = {} |
| 60 | + override def close() = {} |
| 61 | + |
| 62 | + override def getProgress = if (processed) 1.0f else 0.0f |
| 63 | + |
| 64 | + override def getCurrentKey = key |
| 65 | + |
| 66 | + override def getCurrentValue = value |
| 67 | + |
| 68 | + override def nextKeyValue = { |
| 69 | + if (!processed) { |
| 70 | + val fileIn = fs.open(path) |
| 71 | + val innerBuffer = ByteStreams.toByteArray(fileIn) |
| 72 | + value = parseByteArray(innerBuffer) |
| 73 | + Closeables.close(fileIn, false) |
| 74 | + |
| 75 | + processed = true |
| 76 | + true |
| 77 | + } else { |
| 78 | + false |
| 79 | + } |
| 80 | + } |
| 81 | + def parseByteArray(inArray: Array[Byte]): T |
| 82 | +} |
| 83 | + |
| 84 | +/** |
| 85 | + * A demo class for extracting just the byte array itself |
| 86 | + */ |
| 87 | + |
| 88 | +@serializable class ByteInputFormat extends BinaryFileInputFormat[Array[Byte]] { |
| 89 | + override def createRecordReader(split: InputSplit, taContext: TaskAttemptContext)= |
| 90 | + { |
| 91 | + new CombineFileRecordReader[String,Array[Byte]](split.asInstanceOf[CombineFileSplit],taContext,classOf[ByteRecordReader]) |
| 92 | + } |
| 93 | +} |
| 94 | + |
| 95 | +@serializable class ByteRecordReader( |
| 96 | + split: CombineFileSplit, |
| 97 | + context: TaskAttemptContext, |
| 98 | + index: Integer) |
| 99 | + extends BinaryRecordReader[Array[Byte]](split,context,index) { |
| 100 | + |
| 101 | + def parseByteArray(inArray: Array[Byte]) = inArray |
| 102 | +} |
0 commit comments