Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Build and test on Java 11 #6119

Merged
merged 12 commits into from
Oct 7, 2019
30 changes: 17 additions & 13 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,16 @@ jdk:
- openjdk8
env:
matrix:
- TEST_TYPE=cloud UPLOAD=true TESTS_REQUIRE_GCLOUD=true
- TEST_TYPE=integration TEST_DOCKER=true TEST_VERBOSITY=minimal
- TEST_TYPE=unit TEST_DOCKER=true TEST_VERBOSITY=minimal
- TEST_TYPE=variantcalling TEST_DOCKER=true TEST_VERBOSITY=minimal
- TEST_TYPE=python TEST_DOCKER=true TEST_VERBOSITY=minimal
- RUN_CNV_GERMLINE_COHORT_WDL=true TESTS_REQUIRE_GCLOUD=true
- RUN_CNV_GERMLINE_CASE_WDL=true TESTS_REQUIRE_GCLOUD=true
- RUN_CNV_SOMATIC_WDL=true TESTS_REQUIRE_GCLOUD=true
- RUN_M2_WDL=true TESTS_REQUIRE_GCLOUD=true
- RUN_CNN_WDL=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 TEST_TYPE=cloud UPLOAD=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 TEST_TYPE=integration TEST_DOCKER=true TEST_VERBOSITY=minimal
- SCALA_VERSION=2.11 TEST_TYPE=unit TEST_DOCKER=true TEST_VERBOSITY=minimal
- SCALA_VERSION=2.11 TEST_TYPE=variantcalling TEST_DOCKER=true TEST_VERBOSITY=minimal
- SCALA_VERSION=2.11 TEST_TYPE=python TEST_DOCKER=true TEST_VERBOSITY=minimal
- SCALA_VERSION=2.11 RUN_CNV_GERMLINE_COHORT_WDL=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 RUN_CNV_GERMLINE_CASE_WDL=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 RUN_CNV_SOMATIC_WDL=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 RUN_M2_WDL=true TESTS_REQUIRE_GCLOUD=true
- SCALA_VERSION=2.11 RUN_CNN_WDL=true TESTS_REQUIRE_GCLOUD=true
global:
#gradle needs this
- TERM=dumb
Expand Down Expand Up @@ -47,7 +47,11 @@ matrix:
fast_finish: true
include:
- jdk: oraclejdk8
env: TEST_TYPE=integration TEST_VERBOSITY=minimal TEST_REQUIRE_GCLOUD=true
env: SCALA_VERSION=2.11 TEST_TYPE=integration TEST_VERBOSITY=minimal TEST_REQUIRE_GCLOUD=true
- jdk: openjdk11
env: SCALA_VERSION=2.12 TEST_TYPE=integration TEST_VERBOSITY=minimal
- jdk: openjdk11
env: SCALA_VERSION=2.12 TEST_TYPE=unit TEST_VERBOSITY=minimal
before_cache:
- rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
- rm -fr $HOME/.gradle/caches/*/plugin-resolution/
Expand Down Expand Up @@ -162,13 +166,13 @@ script:
sudo mkdir -p build/reports/;
sudo chmod -R a+w build/reports/;
cp scripts/docker/dockertest.gradle .;
sudo docker run -v $(pwd):/gatkCloneMountPoint:cached -v $(pwd)/testJars:/jars:cached --rm -e "TEST_VERBOSITY=minimal" -e "TEST_TYPE=${TEST_TYPE}" -t broadinstitute/gatk:${DOCKER_TAG} bash --init-file /gatk/gatkenv.rc /root/run_unit_tests.sh;
sudo docker run -v $(pwd):/gatkCloneMountPoint:cached -v $(pwd)/testJars:/jars:cached --rm -e "scala.version=${SCALA_VERSION}" -e "TEST_VERBOSITY=minimal" -e "TEST_TYPE=${TEST_TYPE}" -t broadinstitute/gatk:${DOCKER_TAG} bash --init-file /gatk/gatkenv.rc /root/run_unit_tests.sh;
TEST_EXIT_VALUE=$?;
sudo mkdir build/reports/tests/test && sudo cp -rp build/reports/tests/testOnPackagedReleaseJar/* build/reports/tests/test && sudo rm -r build/reports/tests/testOnPackagedReleaseJar;
$( exit ${TEST_EXIT_VALUE} );
else
./gatk PrintReads -I src/test/resources/NA12878.chr17_69k_70k.dictFix.bam -O output.bam;
travis_wait 50 ./gradlew jacocoTestReport;
travis_wait 50 ./gradlew -Dscala.version=${SCALA_VERSION} jacocoTestReport;
fi;
# This creates and uploads the gatk zip file to the nightly build bucket, only keeping the 10 newest entries
# This also constructs the Docker image and uploads it to https://cloud.docker.com/u/broadinstitute/repository/docker/broadinstitute/gatk-nightly/
Expand Down
29 changes: 19 additions & 10 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,11 @@ repositories {
mavenLocal()
}

final requiredJavaVersion = "8"
final htsjdkVersion = System.getProperty('htsjdk.version','2.20.3')
final picardVersion = System.getProperty('picard.version','2.20.7')
final barclayVersion = System.getProperty('barclay.version','2.1.0')
final sparkVersion = System.getProperty('spark.version', '2.4.3')
final scalaVersion = System.getProperty('scala.version', '2.11')
final hadoopVersion = System.getProperty('hadoop.version', '2.8.2')
final disqVersion = System.getProperty('disq.version','0.3.3')
final genomicsdbVersion = System.getProperty('genomicsdb.version','1.1.2')
Expand Down Expand Up @@ -131,15 +131,23 @@ def looksLikeWereInAGitRepository(){
file(".git").isDirectory() || (file(".git").exists() && file(".git").text.startsWith("gitdir"))
}

// Ensure that we have the right JDK version, a clone of the git repository, and resolve any required git-lfs
// Ensure that we have a clone of the git repository, and resolve any required git-lfs
// resource files that are needed to run the build but are still lfs stub files.
def ensureBuildPrerequisites(requiredJavaVersion, largeResourcesFolder, buildPrerequisitesMessage) {
// Make sure we can get a ToolProvider class loader. If not we may have just a JRE, or a JDK from the future.
if (ToolProvider.getSystemToolClassLoader() == null) {
def ensureBuildPrerequisites(largeResourcesFolder, buildPrerequisitesMessage) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we still need some sort of test here because this also catches people who are running with a java 8 JRE instead of a JDK. It looks like gradle can tell you the version using JavaVersion.current() and we can still have this test in the case of java 8. We could throw a clear error message for lower versions, and a warning for versions 9/10/ 12+ that it's not tested on those.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I've updated the PR to do what you suggest.

if (!JavaVersion.current().isJava8Compatible()) {
throw new GradleException(
"Java 8 or later is required to build GATK, but ${JavaVersion.current()} was found. "
+ "$buildPrerequisitesMessage")
}
// Make sure we can get a ToolProvider class loader (for Java 8). If not we may have just a JRE.
if (JavaVersion.current().isJava8() && ToolProvider.getSystemToolClassLoader() == null) {
throw new GradleException(
"The ClassLoader obtained from the Java ToolProvider is null. "
+ "A Java $requiredJavaVersion JDK must be installed. $buildPrerequisitesMessage")
}
if (!JavaVersion.current().isJava8() && !JavaVersion.current().isJava11()) {
println("Warning: using Java ${JavaVersion.current()} but only Java 8 and Java 11 have been tested.")
}
if (!looksLikeWereInAGitRepository()) {
throw new GradleException("This doesn't appear to be a git folder. " +
"The GATK Github repository must be cloned using \"git clone\" to run the build. " +
Expand All @@ -150,7 +158,7 @@ def ensureBuildPrerequisites(requiredJavaVersion, largeResourcesFolder, buildPre
resolveLargeResourceStubFiles(largeResourcesFolder, buildPrerequisitesMessage)
}

ensureBuildPrerequisites(requiredJavaVersion, largeResourcesFolder, buildPrerequisitesMessage)
ensureBuildPrerequisites(largeResourcesFolder, buildPrerequisitesMessage)

configurations.all {
resolutionStrategy {
Expand Down Expand Up @@ -221,7 +229,7 @@ configurations {
// Get the jdk files we need to run javaDoc. We need to use these during compile, testCompile,
// test execution, and gatkDoc generation, but we don't want them as part of the runtime
// classpath and we don't want to redistribute them in the uber jar.
final javadocJDKFiles = files(((URLClassLoader) ToolProvider.getSystemToolClassLoader()).getURLs())
final javadocJDKFiles = ToolProvider.getSystemToolClassLoader() == null ? files([]) : files(((URLClassLoader) ToolProvider.getSystemToolClassLoader()).getURLs())

dependencies {
// javadoc utilities; compile/test only to prevent redistribution of sdk jars
Expand Down Expand Up @@ -280,15 +288,16 @@ dependencies {
compile ('org.ojalgo:ojalgo-commons-math3:1.0.0') {
exclude group: 'org.apache.commons'
}
compile ('org.apache.spark:spark-mllib_2.11:' + sparkVersion) {
compile ('org.apache.spark:spark-mllib_' + scalaVersion + ':' + sparkVersion) {
// JUL is used by Google Dataflow as the backend logger, so exclude jul-to-slf4j to avoid a loop
exclude module: 'jul-to-slf4j'
exclude module: 'javax.servlet'
exclude module: 'servlet-api'
}
compile 'com.thoughtworks.paranamer:paranamer:2.8'

compile 'org.bdgenomics.bdg-formats:bdg-formats:0.5.0'
compile('org.bdgenomics.adam:adam-core-spark2_2.11:0.20.0') {
compile('org.bdgenomics.adam:adam-core-spark2_' + scalaVersion + ':0.28.0') {
exclude group: 'org.slf4j'
exclude group: 'org.apache.hadoop'
exclude group: 'org.scala-lang'
Expand Down Expand Up @@ -349,7 +358,7 @@ dependencies {

testCompile sourceSets.testUtils.output

testCompile "org.mockito:mockito-core:2.10.0"
testCompile "org.mockito:mockito-core:2.28.2"
testCompile "com.google.jimfs:jimfs:1.1"
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,8 @@ public FeatureDataSource(final FeatureInput<T> featureInput, final int queryLook
Utils.nonNull(genomicsDBOptions, "GenomicsDBOptions must not be null. Calling tool may not read from a GenomicsDB data source.");
}

final Function<SeekableByteChannel, SeekableByteChannel> cloudWrapper = (cloudPrefetchBuffer > 0 ? is -> SeekableByteChannelPrefetcher.addPrefetcher(cloudPrefetchBuffer, is) : Function.identity());
final Function<SeekableByteChannel, SeekableByteChannel> cloudIndexWrapper = (cloudIndexPrefetchBuffer > 0 ? is -> SeekableByteChannelPrefetcher.addPrefetcher(cloudIndexPrefetchBuffer, is) : Function.identity());
final Function<SeekableByteChannel, SeekableByteChannel> cloudWrapper = (cloudPrefetchBuffer > 0 ? is -> SeekableByteChannelPrefetcher.addPrefetcher(cloudPrefetchBuffer, is) : Utils.identityFunction());
final Function<SeekableByteChannel, SeekableByteChannel> cloudIndexWrapper = (cloudIndexPrefetchBuffer > 0 ? is -> SeekableByteChannelPrefetcher.addPrefetcher(cloudIndexPrefetchBuffer, is) : Utils.identityFunction());

// Create a feature reader without requiring an index. We will require one ourselves as soon as
// a query by interval is attempted.
Expand Down Expand Up @@ -371,7 +371,7 @@ private static <T extends Feature> FeatureReader<T> getFeatureReader(final Featu
if (BucketUtils.isCloudStorageUrl(featureInput)) {
return AbstractFeatureReader.getFeatureReader(absoluteRawPath, null, codec, requireIndex, cloudWrapper, cloudIndexWrapper);
} else {
return AbstractFeatureReader.getFeatureReader(absoluteRawPath, null, codec, requireIndex, Function.identity(), Function.identity());
return AbstractFeatureReader.getFeatureReader(absoluteRawPath, null, codec, requireIndex, Utils.identityFunction(), Utils.identityFunction());
}
} catch (final TribbleException e) {
throw new GATKException("Error initializing feature reader for path " + featureInput.getFeaturePath(), e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.broadinstitute.hellbender.utils.io.IOUtils;
import org.broadinstitute.hellbender.utils.read.GATKRead;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
Expand Down Expand Up @@ -90,7 +91,7 @@ protected Broadcast<Supplier<AssemblyRegionEvaluator>> assemblyRegionEvaluatorSu
}

private static Broadcast<Supplier<AssemblyRegionEvaluator>> assemblyRegionEvaluatorSupplierBroadcastFunction(final JavaSparkContext ctx, final AssemblyRegionEvaluator assemblyRegionEvaluator) {
Supplier<AssemblyRegionEvaluator> supplier = () -> assemblyRegionEvaluator;
Supplier<AssemblyRegionEvaluator> supplier = (Supplier<AssemblyRegionEvaluator> & Serializable) (() -> assemblyRegionEvaluator);
return ctx.broadcast(supplier);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.bdgenomics.adam.models.RecordGroupDictionary;
import org.bdgenomics.adam.models.ReadGroupDictionary;
import org.bdgenomics.adam.models.SequenceDictionary;
import org.bdgenomics.formats.avro.AlignmentRecord;
import org.broadinstitute.hellbender.exceptions.GATKException;
Expand Down Expand Up @@ -157,7 +157,7 @@ private static void writeReadsADAM(
final JavaSparkContext ctx, final String outputFile, final JavaRDD<SAMRecord> reads,
final SAMFileHeader header) throws IOException {
final SequenceDictionary seqDict = SequenceDictionary.fromSAMSequenceDictionary(header.getSequenceDictionary());
final RecordGroupDictionary readGroups = RecordGroupDictionary.fromSAMHeader(header);
final ReadGroupDictionary readGroups = ReadGroupDictionary.fromSAMHeader(header);
final JavaPairRDD<Void, AlignmentRecord> rddAlignmentRecords =
reads.map(read -> {
read.setHeaderStrict(header);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,19 @@
import org.bdgenomics.adam.models.ReferenceRegion;
import org.bdgenomics.adam.util.TwoBitFile;
import org.bdgenomics.adam.util.TwoBitRecord;
import org.bdgenomics.formats.avro.Strand;
import org.bdgenomics.utils.io.ByteAccess;
import org.broadinstitute.hellbender.utils.SimpleInterval;
import org.broadinstitute.hellbender.utils.Utils;
import org.broadinstitute.hellbender.utils.gcs.BucketUtils;
import org.broadinstitute.hellbender.utils.reference.ReferenceBases;
import scala.Tuple2;
import scala.collection.JavaConversions;
import scala.collection.immutable.IndexedSeq;

import java.io.IOException;
import java.io.Serializable;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
Expand All @@ -39,7 +43,10 @@ public ReferenceTwoBitSparkSource( String referenceURL) throws IOException {
byte[] bytes = ByteStreams.toByteArray(BucketUtils.openFile(this.referenceURL));
ByteAccess byteAccess = new DirectFullByteArrayByteAccess(bytes);
this.twoBitFile = new TwoBitFile(byteAccess);
this.twoBitSeqEntries = JavaConversions.mapAsJavaMap(twoBitFile.seqRecords());
this.twoBitSeqEntries = new LinkedHashMap<>();
for (Tuple2<String, TwoBitRecord> pair: JavaConversions.seqAsJavaList(twoBitFile.seqRecords())) {
twoBitSeqEntries.put(pair._1, pair._2);
}
}

/**
Expand Down Expand Up @@ -74,7 +81,7 @@ private static ReferenceRegion simpleIntervalToReferenceRegion(SimpleInterval in
String contig = interval.getContig();
long start = interval.getGA4GHStart();
long end = interval.getGA4GHEnd();
return new ReferenceRegion(contig, start, end, null);
return new ReferenceRegion(contig, start, end, Strand.UNKNOWN);
}

private SimpleInterval cropIntervalAtContigEnd( final SimpleInterval interval ) {
Expand Down
13 changes: 13 additions & 0 deletions src/main/java/org/broadinstitute/hellbender/utils/Utils.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Serializable;
import java.lang.reflect.Array;
import java.math.BigInteger;
import java.nio.file.Files;
Expand Down Expand Up @@ -1102,6 +1103,18 @@ public static <T> Stream<T> stream(final Iterator<T> iterator) {
return stream(() -> iterator);
}

/**
* Returns a function that always returns its input argument. Unlike {@link Function#identity()} the returned
* function is also serializable.
*
* @param <T> the type of the input and output objects to the function
* @return a function that always returns its input argument
*/
@SuppressWarnings("unchecked")
public static <T> Function<T, T> identityFunction() {
return (Function & Serializable) t -> t;
}

/**
* Like Guava's {@link Iterators#transform(Iterator, com.google.common.base.Function)}, but runs a fixed number
* ({@code numThreads}) of transformations in parallel, while maintaining ordering of the output iterator.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ public static File writeTempResourceFromPath(final String resourcePath, final Cl
* @param resource Embedded resource.
* @param file File path to write.
*/
@SuppressWarnings("deprecation")
public static void writeResource(Resource resource, File file) {
String path = resource.getPath();
InputStream inputStream = resource.getResourceContentsAsStream();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
import htsjdk.samtools.SAMFileHeader;
import htsjdk.samtools.SAMRecord;
import org.bdgenomics.adam.converters.AlignmentRecordConverter;
import org.bdgenomics.adam.models.RecordGroupDictionary;
import org.bdgenomics.adam.models.SAMFileHeaderWritable;
import org.bdgenomics.adam.models.ReadGroupDictionary;
import org.bdgenomics.formats.avro.AlignmentRecord;

/**
Expand All @@ -30,8 +29,8 @@ public final class BDGAlignmentRecordToGATKReadAdapter extends SAMRecordToGATKRe
private final AlignmentRecord alignmentRecord;

public BDGAlignmentRecordToGATKReadAdapter(final AlignmentRecord alignmentRecord, final SAMFileHeader header) {
super(new AlignmentRecordConverter().convert(alignmentRecord, SAMFileHeaderWritable.apply(header),
RecordGroupDictionary.fromSAMHeader(header)));
super(new AlignmentRecordConverter().convert(alignmentRecord, header,
ReadGroupDictionary.fromSAMHeader(header)));
this.alignmentRecord = alignmentRecord;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import org.bdgenomics.formats.avro.AlignmentRecord;
import org.bdgenomics.adam.converters.SAMRecordConverter;
import org.bdgenomics.adam.models.SequenceDictionary;
import org.bdgenomics.adam.models.RecordGroupDictionary;
import org.bdgenomics.adam.models.ReadGroupDictionary;

/**
* Converts a GATKRead to a BDG AlignmentRecord
Expand All @@ -15,27 +15,27 @@ public class GATKReadToBDGAlignmentRecordConverter {

private SAMFileHeader header;
private SequenceDictionary dict;
private RecordGroupDictionary readGroups;
private ReadGroupDictionary readGroups;

public GATKReadToBDGAlignmentRecordConverter(SAMFileHeader header) {
this.header = header;
this.dict = SequenceDictionary.fromSAMSequenceDictionary(header.getSequenceDictionary());
this.readGroups = RecordGroupDictionary.fromSAMHeader(header);
this.readGroups = ReadGroupDictionary.fromSAMHeader(header);
}

public static AlignmentRecord convert( final GATKRead gatkRead, final SAMFileHeader header ) {
SequenceDictionary dict = SequenceDictionary.fromSAMSequenceDictionary(header.getSequenceDictionary());
RecordGroupDictionary readGroups = RecordGroupDictionary.fromSAMHeader(header);
ReadGroupDictionary readGroups = ReadGroupDictionary.fromSAMHeader(header);
return GATKReadToBDGAlignmentRecordConverter.convert(gatkRead, header, dict, readGroups);
}

public static AlignmentRecord convert(
final GATKRead gatkRead, final SAMFileHeader header, final SequenceDictionary dict, final RecordGroupDictionary readGroups ) {
final GATKRead gatkRead, final SAMFileHeader header, final SequenceDictionary dict, final ReadGroupDictionary readGroups ) {
return converter.convert(gatkRead.convertToSAMRecord(header));
}

public static AlignmentRecord convert(
final SAMRecord sam, final SequenceDictionary dict, final RecordGroupDictionary readGroups ) {
final SAMRecord sam, final SequenceDictionary dict, final ReadGroupDictionary readGroups ) {
return converter.convert(sam);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ public boolean isBufferTruncated() {
*
* @throws IOException When unable to read or write.
*/
@SuppressWarnings("deprecation")
public void read() throws IOException {
int readCount = 0;
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ public static int exec(String[] command) {
* @param settings Settings to be run.
* @return The output of the command.
*/
@SuppressWarnings("deprecation")
public ProcessOutput exec(ProcessSettings settings) {
StreamOutput stdout;
StreamOutput stderr;
Expand Down Expand Up @@ -161,6 +162,7 @@ public ProcessOutput exec(ProcessSettings settings) {
* TODO: Try to use NIO to interrupt streams.
*/
@Override
@SuppressWarnings("deprecation")
protected void tryCleanShutdown() {
destroyed = true;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,7 @@ private void startListeners() {
* NOTE: capture threads may block on read.
*/
@Override
@SuppressWarnings("deprecation")
protected void tryCleanShutdown() {
if (stdErrFuture != null && !stdErrFuture.isDone()) {
boolean isCancelled = stdErrFuture.cancel(true);
Expand Down
Loading