Skip to content

Commit

Permalink
Add support for reading and writing SBI files for BAMs.
Browse files Browse the repository at this point in the history
  • Loading branch information
tomwhite committed Jun 25, 2018
1 parent 38a24d5 commit a851c3a
Show file tree
Hide file tree
Showing 7 changed files with 589 additions and 0 deletions.
10 changes: 10 additions & 0 deletions src/main/java/htsjdk/samtools/BAMFileReader.java
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,12 @@ static long findVirtualOffsetOfFirstRecord(final File bam) throws IOException {
return offset;
}

/** Reads through the header and sequence records to find the virtual file offset of the first record in the BAM file. */
static long findVirtualOffsetOfFirstRecord(final SeekableStream seekableStream) throws IOException {
final BAMFileReader reader = new BAMFileReader(seekableStream, (SeekableStream) null, false, false, ValidationStringency.SILENT, new DefaultSAMRecordFactory());
return reader.mFirstRecordPointer;
}

/**
* If true, writes the source of every read into the source SAMRecords.
* @param enabled true to write source information into each SAMRecord.
Expand Down Expand Up @@ -944,6 +950,10 @@ public CloseableIterator<SAMRecord> createIndexIterator(final QueryInterval[] in
return new BAMQueryFilteringIterator(iterator, new BAMQueryMultipleIntervalsIteratorFilter(intervals, contained));
}

public long getVirtualFilePointer() {
return mCompressedInputStream.getFilePointer();
}

/**
* Iterate over the SAMRecords defined by the sections of the file described in the ctor argument.
*/
Expand Down
65 changes: 65 additions & 0 deletions src/main/java/htsjdk/samtools/BAMSBIIndexer.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
package htsjdk.samtools;

import htsjdk.samtools.cram.io.InputStreamUtils;
import htsjdk.samtools.seekablestream.SeekablePathStream;
import htsjdk.samtools.seekablestream.SeekableStream;
import htsjdk.samtools.util.BlockCompressedInputStream;
import htsjdk.samtools.util.IOUtil;

import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.file.Files;
import java.nio.file.Path;

/**
* Writes SBI files for BAM files, as understood by {@link SBIIndex}.
*/
public final class BAMSBIIndexer {

/**
* Perform indexing on the given BAM file, at the granularity level specified.
*
* @param bamFile the path to the BAM file
* @param granularity write the offset of every n-th alignment to the index
* @throws IOException as per java IO contract
*/
public static void createIndex(final Path bamFile, final long granularity) throws IOException {
Path splittingBaiFile = IOUtil.addExtension(bamFile, SBIIndex.FILE_EXTENSION);
try (SeekableStream in = new SeekablePathStream(bamFile); OutputStream out = Files.newOutputStream(splittingBaiFile)) {
createIndex(in, out, granularity);
}
}

/**
* Perform indexing on the given BAM file, at the granularity level specified.
*
* @param in a seekable stream for reading the BAM file from
* @param out the stream to write the index to
* @param granularity write the offset of every n-th alignment to the index
* @throws IOException as per java IO contract
*/
public static void createIndex(final SeekableStream in, final OutputStream out, final long granularity) throws IOException {
long recordStart = SAMUtils.findVirtualOffsetOfFirstRecordInBam(in);
try (BlockCompressedInputStream blockIn = new BlockCompressedInputStream(in)) {
blockIn.seek(recordStart);
final ByteBuffer byteBuffer = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN); // BAM is little-endian
SBIIndexWriter indexWriter = new SBIIndexWriter(out, granularity);
while (true) {
try {
recordStart = blockIn.getFilePointer();
InputStreamUtils.readFully(blockIn, byteBuffer.array(), 0, 4);
final int blockSize = byteBuffer.getInt(0); // length of remainder of alignment record
indexWriter.processRecord(recordStart);
InputStreamUtils.skipFully(blockIn, blockSize);
} catch (EOFException e) {
break;
}
}
indexWriter.writeVirtualOffset(recordStart);
indexWriter.finish(in.length());
}
}
}
13 changes: 13 additions & 0 deletions src/main/java/htsjdk/samtools/SAMUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
*/
package htsjdk.samtools;

import htsjdk.samtools.seekablestream.SeekableStream;
import htsjdk.samtools.util.BinaryCodec;
import htsjdk.samtools.util.CigarUtil;
import htsjdk.samtools.util.CloserUtil;
Expand Down Expand Up @@ -685,6 +686,18 @@ public static long findVirtualOffsetOfFirstRecordInBam(final File bamFile) {
}
}

/**
* Returns the virtual file offset of the first record in a BAM file - i.e. the virtual file
* offset after skipping over the text header and the sequence records.
*/
public static long findVirtualOffsetOfFirstRecordInBam(final SeekableStream seekableStream) {
try {
return BAMFileReader.findVirtualOffsetOfFirstRecord(seekableStream);
} catch (final IOException ioe) {
throw new RuntimeEOFException(ioe);
}
}

/**
* Given a Cigar, Returns blocks of the sequence that have been aligned directly to the
* reference sequence. Note that clipped portions, and inserted and deleted bases (vs. the reference)
Expand Down
251 changes: 251 additions & 0 deletions src/main/java/htsjdk/samtools/SBIIndex.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
package htsjdk.samtools;

import htsjdk.samtools.util.BinaryCodec;
import htsjdk.samtools.util.BlockCompressedFilePointerUtil;

import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.NavigableSet;
import java.util.TreeSet;

/**
* SBI is an index into BGZF-compressed data files, which has an entry for the file position of the start of every
* <i>n</i>th record. Reads files that were created by {@link BAMSBIIndexer}.
*/
public final class SBIIndex {

public static class Header {
private final long fileLength;
private final byte[] md5;
private final byte[] uuid;
private final long totalNumberOfRecords;
private final long granularity;

public Header(long fileLength, byte[] md5, byte[] uuid, long totalNumberOfRecords, long granularity) {
this.fileLength = fileLength;
this.md5 = md5;
this.uuid = uuid;
this.totalNumberOfRecords = totalNumberOfRecords;
this.granularity = granularity;
}

public long getFileLength() {
return fileLength;
}

public byte[] getMd5() {
return md5;
}

public byte[] getUuid() {
return uuid;
}

public long getTotalNumberOfRecords() {
return totalNumberOfRecords;
}

public long getGranularity() {
return granularity;
}
}

public static final String FILE_EXTENSION = ".sbi";

/**
* SBI magic number.
*/
static final byte[] SBI_MAGIC = "SBI\1".getBytes();

private final Header header;
private final NavigableSet<Long> virtualOffsets;

/**
* Create an in-memory SBI with the given virtual offsets.
* @param virtualOffsets the offsets in the index
*/
public SBIIndex(final Header header, final NavigableSet<Long> virtualOffsets) {
this.header = header;
this.virtualOffsets = new TreeSet<>(virtualOffsets);
if (this.virtualOffsets.isEmpty()) {
throw new RuntimeException("Invalid SBI format: should contain at least one offset");
}
}

/**
* Load an SBI into memory from a path.
* @param path the path to the SBI file
* @throws IOException as per java IO contract
*/
public static SBIIndex load(final Path path) throws IOException {
try (InputStream in = new BufferedInputStream(Files.newInputStream(path))) {
return readIndex(in);
}
}

/**
* Load an SBI into memory from a stream.
* @param in the stream to read the SBI from
*/
public static SBIIndex load(final InputStream in) {
return readIndex(in);
}

private static SBIIndex readIndex(final InputStream in) {
BinaryCodec binaryCodec = new BinaryCodec(in);
Header header = readHeader(binaryCodec);
long numOffsets = binaryCodec.readLong();
NavigableSet<Long> virtualOffsets = new TreeSet<>();
long prev = -1;
for (long i = 0; i < numOffsets; i++) {
long cur = binaryCodec.readLong();
if (prev > cur) {
throw new RuntimeException(String.format(
"Invalid SBI; offsets not in order: %#x > %#x",
prev, cur));
}
virtualOffsets.add(prev = cur);
}
return new SBIIndex(header, virtualOffsets);
}

private static Header readHeader(BinaryCodec binaryCodec) {
final byte[] buffer = new byte[SBI_MAGIC.length];
binaryCodec.readBytes(buffer);
if (!Arrays.equals(buffer, SBI_MAGIC)) {
throw new RuntimeException("Invalid file header in SBI: " + new String(buffer) + " (" + Arrays.toString(buffer) + ")");
}
long fileLength = binaryCodec.readLong();
byte[] md5 = new byte[16];
binaryCodec.readBytes(md5);
byte[] uuid = new byte[16];
binaryCodec.readBytes(uuid);
long totalNumberOfRecords = binaryCodec.readLong();
long granularity = binaryCodec.readLong();
return new Header(fileLength, md5, uuid, totalNumberOfRecords, granularity);
}

/**
* Returns the granularity of the index, that is the number of alignments between subsequent entries in the index,
* or zero if not specified.
* @return the granularity of the index
*/
public long getGranularity() {
return header.getGranularity();
}

/**
* Returns the entries in the index.
*
* @return a set of file pointers for all the alignment offsets in the index, in ascending order. The last
* virtual file pointer is the position at which the next record would start if it were added to the file.
*/
public NavigableSet<Long> getVirtualOffsets() {
return new TreeSet<>(virtualOffsets);
}

/**
* Returns number of entries in the index.
*
* @return the number of virtual offsets in the index
*/
public long size() {
return virtualOffsets.size();
}

/**
* Returns the length of the data file in bytes.
*
* @return the length of the data file in bytes
*/
public long dataFileLength() {
return header.getFileLength();
}

/**
* Split the data file for this index into non-overlapping chunks of roughly the given size that cover the whole
* file and that can be read independently of one another.
*
* @param splitSize the rough size of each split in bytes
* @return a list of contiguous, non-overlapping, sorted chunks that cover the whole data file
* @see #getChunk(long, long)
*/
public List<Chunk> split(long splitSize) {
if (splitSize <= 0) {
throw new IllegalArgumentException(String.format("Split size must be positive: %s", splitSize));
}
long fileSize = dataFileLength();
List<Chunk> chunks = new ArrayList<>();
for (long splitStart = 0; splitStart < fileSize; splitStart += splitSize) {
Chunk chunk = getChunk(splitStart, splitStart + splitSize);
if (chunk != null) {
chunks.add(chunk);
}
}
return chunks;
}

/**
* Return a chunk that corresponds to the given range in the data file. Note that the chunk does not necessarily
* completely cover the given range, however this method will map a set of contiguous, non-overlapping file ranges
* that cover the whole data file to a set of contiguous, non-overlapping chunks that cover the whole data file.
*
* @param splitStart the start of the file range (inclusive)
* @param splitEnd the start of the file range (exclusive)
* @return a chunk whose virtual start is at the first alignment start position that is greater than or equal to the
* given split start position, and whose virtual end is at the first alignment start position that is greater than
* or equal to the given split end position, or null if the chunk would be empty.
* @see #split(long)
*/
public Chunk getChunk(long splitStart, long splitEnd) {
if (splitStart >= splitEnd) {
throw new IllegalArgumentException(String.format("Split start (%s) must be less than end (%s)", splitStart, splitEnd));
}
long maxEnd = BlockCompressedFilePointerUtil.getBlockAddress(virtualOffsets.last());
splitStart = Math.min(splitStart, maxEnd);
splitEnd = Math.min(splitEnd, maxEnd);
long virtualSplitStart = BlockCompressedFilePointerUtil.makeFilePointer(splitStart);
long virtualSplitEnd = BlockCompressedFilePointerUtil.makeFilePointer(splitEnd);
Long virtualSplitStartAlignment = virtualOffsets.ceiling(virtualSplitStart);
Long virtualSplitEndAlignment = virtualOffsets.ceiling(virtualSplitEnd);
// neither virtualSplitStartAlignment nor virtualSplitEndAlignment should ever be null, but check anyway
if (virtualSplitStartAlignment == null) {
throw new IllegalArgumentException(String.format("No virtual offset found for virtual file pointer %s, last virtual offset %s",
BlockCompressedFilePointerUtil.asString(virtualSplitStart), BlockCompressedFilePointerUtil.asString(virtualOffsets.last())));
}
if (virtualSplitEndAlignment == null) {
throw new IllegalArgumentException(String.format("No virtual offset found for virtual file pointer %s, last virtual offset %s",
BlockCompressedFilePointerUtil.asString(virtualSplitEnd), BlockCompressedFilePointerUtil.asString(virtualOffsets.last())));
}
if (virtualSplitStartAlignment.longValue() == virtualSplitEndAlignment.longValue()) {
return null;
}
return new Chunk(virtualSplitStartAlignment, virtualSplitEndAlignment);
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;

SBIIndex that = (SBIIndex) o;

return virtualOffsets.equals(that.virtualOffsets);
}

@Override
public int hashCode() {
return virtualOffsets.hashCode();
}

@Override
public String toString() {
return virtualOffsets.toString();
}
}
Loading

0 comments on commit a851c3a

Please sign in to comment.