diff --git a/sdk/storage/azure-storage-blob/CHANGELOG.md b/sdk/storage/azure-storage-blob/CHANGELOG.md index 6cf72b742aa9..64e0da6e8f89 100644 --- a/sdk/storage/azure-storage-blob/CHANGELOG.md +++ b/sdk/storage/azure-storage-blob/CHANGELOG.md @@ -3,6 +3,7 @@ ## 12.8.0-beta.2 (Unreleased) - Fixed a bug that, when the data length parameter did not match the actual length of the data in BlobClient.upload, caused a zero length blob to be uploaded rather than throwing an exception. - Fixed a bug that ignored the customer's specified block size when determining buffer sizes in BlobClient.upload +- Fixed bug where Query Input Stream would throw when a ByteBuffer of length 0 was encountered. ## 12.8.0-beta.1 (2020-07-07) - Added support for the 2019-12-12 service version. diff --git a/sdk/storage/azure-storage-common/CHANGELOG.md b/sdk/storage/azure-storage-common/CHANGELOG.md index c46970cb89f5..751e9b1573c1 100644 --- a/sdk/storage/azure-storage-common/CHANGELOG.md +++ b/sdk/storage/azure-storage-common/CHANGELOG.md @@ -1,7 +1,7 @@ # Release History ## 12.8.0-beta.2 (Unreleased) - +- Fixed bug where FluxInputStream would throw when a ByteBuffer of length 0 was encountered. ## 12.8.0-beta.1 (2020-07-07) - Added support for the 2019-12-12 service version. diff --git a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/FluxInputStream.java b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/FluxInputStream.java index 3f84658f920b..5ccf2074f43d 100644 --- a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/FluxInputStream.java +++ b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/FluxInputStream.java @@ -11,6 +11,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.Buffer; import java.nio.ByteBuffer; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; @@ -166,6 +167,7 @@ private void blockForData() { */ private void subscribeToData() { this.data + .filter(Buffer::hasRemaining) /* Filter to make sure only non empty byte buffers are emitted. */ .onBackpressureBuffer() .subscribe( // ByteBuffer consumer diff --git a/sdk/storage/azure-storage-common/src/test/java/com/azure/storage/common/implementation/FluxInputStreamTest.groovy b/sdk/storage/azure-storage-common/src/test/java/com/azure/storage/common/implementation/FluxInputStreamTest.groovy index c58c1e1ac3b5..a2532dc4474a 100644 --- a/sdk/storage/azure-storage-common/src/test/java/com/azure/storage/common/implementation/FluxInputStreamTest.groovy +++ b/sdk/storage/azure-storage-common/src/test/java/com/azure/storage/common/implementation/FluxInputStreamTest.groovy @@ -65,6 +65,40 @@ class FluxInputStreamTest extends Specification { Constants.MB || _ } + @Unroll + def "FluxIS with empty byte buffers"() { + setup: + def num = Constants.KB + List buffers = new ArrayList<>() + for(int i = 0; i < num; i++) { + buffers.add(ByteBuffer.wrap(i.byteValue())) + buffers.add(ByteBuffer.wrap(new byte[0])) + } + def data = Flux.fromIterable(buffers) + + when: + def is = new FluxInputStream(data) + def bytes = new byte[num] + + def totalRead = 0 + def bytesRead = 0 + + while (bytesRead != -1 && totalRead < num) { + bytesRead = is.read(bytes, totalRead, num) + if (bytesRead != -1) { + totalRead += bytesRead + num -= bytesRead + } + } + + is.close() + + then: + for (int i = 0; i < num; i++) { + assert bytes[i] == i.byteValue() + } + } + def "FluxIS error"() { setup: def data = Flux.error(exception) diff --git a/sdk/storage/azure-storage-file-datalake/CHANGELOG.md b/sdk/storage/azure-storage-file-datalake/CHANGELOG.md index 136b79699aca..6fa93001c456 100644 --- a/sdk/storage/azure-storage-file-datalake/CHANGELOG.md +++ b/sdk/storage/azure-storage-file-datalake/CHANGELOG.md @@ -1,7 +1,7 @@ # Release History ## 12.2.0-beta.2 (Unreleased) - +- Fixed bug where Query Input Stream would throw when a ByteBuffer of length 0 was encountered. ## 12.2.0-beta.1 (2019-07-07) - Added support for the 2019-12-12 service version.