diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index aa2561d8c3898..406d957dd298f 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -59,7 +59,7 @@ private[spark] class BlockManager( executorId: String, rpcEnv: RpcEnv, val master: BlockManagerMaster, - defaultSerializer: Serializer, + val defaultSerializer: Serializer, val conf: SparkConf, memoryManager: MemoryManager, mapOutputTracker: MapOutputTracker, @@ -750,7 +750,7 @@ private[spark] class BlockManager( // We will drop it to disk later if the memory store can't hold it. val putSucceeded = if (level.deserialized) { val values = dataDeserialize(blockId, bytes) - memoryStore.putIterator(blockId, values, level) match { + memoryStore.putIteratorAsValues(blockId, values) match { case Right(_) => true case Left(iter) => // If putting deserialized values in memory failed, we will put the bytes directly to @@ -878,21 +878,40 @@ private[spark] class BlockManager( if (level.useMemory) { // Put it in memory first, even if it also has useDisk set to true; // We will drop it to disk later if the memory store can't hold it. - memoryStore.putIterator(blockId, iterator(), level) match { - case Right(s) => - size = s - case Left(iter) => - // Not enough space to unroll this block; drop to disk if applicable - if (level.useDisk) { - logWarning(s"Persisting block $blockId to disk instead.") - diskStore.put(blockId) { fileOutputStream => - dataSerializeStream(blockId, fileOutputStream, iter) + if (level.deserialized) { + memoryStore.putIteratorAsValues(blockId, iterator()) match { + case Right(s) => + size = s + case Left(iter) => + // Not enough space to unroll this block; drop to disk if applicable + if (level.useDisk) { + logWarning(s"Persisting block $blockId to disk instead.") + diskStore.put(blockId) { fileOutputStream => + dataSerializeStream(blockId, fileOutputStream, iter) + } + size = diskStore.getSize(blockId) + } else { + iteratorFromFailedMemoryStorePut = Some(iter) } - size = diskStore.getSize(blockId) - } else { - iteratorFromFailedMemoryStorePut = Some(iter) - } + } + } else { // !level.deserialized + memoryStore.putIteratorAsBytes(blockId, iterator()) match { + case Right(s) => + size = s + case Left(partiallySerializedValues) => + // Not enough space to unroll this block; drop to disk if applicable + if (level.useDisk) { + logWarning(s"Persisting block $blockId to disk instead.") + diskStore.put(blockId) { fileOutputStream => + partiallySerializedValues.finishWriting(fileOutputStream) + } + size = diskStore.getSize(blockId) + } else { + iteratorFromFailedMemoryStorePut = Some(partiallySerializedValues.valuesIterator) + } + } } + } else if (level.useDisk) { diskStore.put(blockId) { fileOutputStream => dataSerializeStream(blockId, fileOutputStream, iterator()) @@ -992,7 +1011,7 @@ private[spark] class BlockManager( // Note: if we had a means to discard the disk iterator, we would do that here. memoryStore.getValues(blockId).get } else { - memoryStore.putIterator(blockId, diskIterator, level) match { + memoryStore.putIteratorAsValues(blockId, diskIterator) match { case Left(iter) => // The memory store put() failed, so it returned the iterator back to us: iter diff --git a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala index 94171324f84b5..91785ae8bdd43 100644 --- a/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala +++ b/core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala @@ -17,31 +17,37 @@ package org.apache.spark.storage.memory +import java.io.OutputStream +import java.nio.ByteBuffer import java.util.LinkedHashMap import scala.collection.mutable import scala.collection.mutable.ArrayBuffer +import com.google.common.io.ByteStreams + import org.apache.spark.{SparkConf, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.memory.MemoryManager -import org.apache.spark.storage.{BlockId, BlockManager, StorageLevel} +import org.apache.spark.serializer.SerializationStream +import org.apache.spark.storage.{BlockId, BlockManager} import org.apache.spark.util.{CompletionIterator, SizeEstimator, Utils} import org.apache.spark.util.collection.SizeTrackingVector -import org.apache.spark.util.io.ChunkedByteBuffer +import org.apache.spark.util.io.{ByteArrayChunkOutputStream, ChunkedByteBuffer} private sealed trait MemoryEntry { - val size: Long + def size: Long } private case class DeserializedMemoryEntry(value: Array[Any], size: Long) extends MemoryEntry -private case class SerializedMemoryEntry(buffer: ChunkedByteBuffer, size: Long) extends MemoryEntry +private case class SerializedMemoryEntry(buffer: ChunkedByteBuffer) extends MemoryEntry { + def size: Long = buffer.size +} /** * Stores blocks in memory, either as Arrays of deserialized Java objects or as * serialized ByteBuffers. */ -private[spark] class MemoryStore( - conf: SparkConf, +private[spark] class MemoryStore( conf: SparkConf, blockManager: BlockManager, memoryManager: MemoryManager) extends Logging { @@ -101,7 +107,7 @@ private[spark] class MemoryStore( // We acquired enough memory for the block, so go ahead and put it val bytes = _bytes() assert(bytes.size == size) - val entry = new SerializedMemoryEntry(bytes, size) + val entry = new SerializedMemoryEntry(bytes) entries.synchronized { entries.put(blockId, entry) } @@ -114,7 +120,7 @@ private[spark] class MemoryStore( } /** - * Attempt to put the given block in memory store. + * Attempt to put the given block in memory store as values. * * It's possible that the iterator is too large to materialize and store in memory. To avoid * OOM exceptions, this method will gradually unroll the iterator while periodically checking @@ -129,10 +135,9 @@ private[spark] class MemoryStore( * iterator or call `close()` on it in order to free the storage memory consumed by the * partially-unrolled block. */ - private[storage] def putIterator( + private[storage] def putIteratorAsValues( blockId: BlockId, - values: Iterator[Any], - level: StorageLevel): Either[PartiallyUnrolledIterator, Long] = { + values: Iterator[Any]): Either[PartiallyUnrolledIterator, Long] = { require(!contains(blockId), s"Block $blockId is already present in the MemoryStore") @@ -186,12 +191,7 @@ private[spark] class MemoryStore( // We successfully unrolled the entirety of this block val arrayValues = vector.toArray vector = null - val entry = if (level.deserialized) { - new DeserializedMemoryEntry(arrayValues, SizeEstimator.estimate(arrayValues)) - } else { - val bytes = blockManager.dataSerialize(blockId, arrayValues.iterator) - new SerializedMemoryEntry(bytes, bytes.size) - } + val entry = new DeserializedMemoryEntry(arrayValues, SizeEstimator.estimate(arrayValues)) val size = entry.size def transferUnrollToStorage(amount: Long): Unit = { // Synchronize so that transfer is atomic @@ -223,9 +223,8 @@ private[spark] class MemoryStore( entries.synchronized { entries.put(blockId, entry) } - val bytesOrValues = if (level.deserialized) "values" else "bytes" - logInfo("Block %s stored as %s in memory (estimated size %s, free %s)".format( - blockId, bytesOrValues, Utils.bytesToString(size), Utils.bytesToString(blocksMemoryUsed))) + logInfo("Block %s stored as values in memory (estimated size %s, free %s)".format( + blockId, Utils.bytesToString(size), Utils.bytesToString(blocksMemoryUsed))) Right(size) } else { assert(currentUnrollMemoryForThisTask >= currentUnrollMemoryForThisTask, @@ -244,13 +243,113 @@ private[spark] class MemoryStore( } } + /** + * Attempt to put the given block in memory store as bytes. + * + * It's possible that the iterator is too large to materialize and store in memory. To avoid + * OOM exceptions, this method will gradually unroll the iterator while periodically checking + * whether there is enough free memory. If the block is successfully materialized, then the + * temporary unroll memory used during the materialization is "transferred" to storage memory, + * so we won't acquire more memory than is actually needed to store the block. + * + * @return in case of success, the estimated the estimated size of the stored data. In case of + * failure, return a handle which allows the caller to either finish the serialization + * by spilling to disk or to deserialize the partially-serialized block and reconstruct + * the original input iterator. The caller must either fully consume this result + * iterator or call `discard()` on it in order to free the storage memory consumed by the + * partially-unrolled block. + */ + private[storage] def putIteratorAsBytes( + blockId: BlockId, + values: Iterator[Any]): Either[PartiallySerializedBlock, Long] = { + + require(!contains(blockId), s"Block $blockId is already present in the MemoryStore") + + // Whether there is still enough memory for us to continue unrolling this block + var keepUnrolling = true + // Initial per-task memory to request for unrolling blocks (bytes). + val initialMemoryThreshold = unrollMemoryThreshold + // Keep track of unroll memory used by this particular block / putIterator() operation + var unrollMemoryUsedByThisBlock = 0L + // Underlying buffer for unrolling the block + val redirectableStream = new RedirectableOutputStream + val byteArrayChunkOutputStream = new ByteArrayChunkOutputStream(initialMemoryThreshold.toInt) + redirectableStream.setOutputStream(byteArrayChunkOutputStream) + val serializationStream: SerializationStream = { + val ser = blockManager.defaultSerializer.newInstance() + ser.serializeStream(blockManager.wrapForCompression(blockId, redirectableStream)) + } + + // Request enough memory to begin unrolling + keepUnrolling = reserveUnrollMemoryForThisTask(blockId, initialMemoryThreshold) + + if (!keepUnrolling) { + logWarning(s"Failed to reserve initial memory threshold of " + + s"${Utils.bytesToString(initialMemoryThreshold)} for computing block $blockId in memory.") + } else { + unrollMemoryUsedByThisBlock += initialMemoryThreshold + } + + def reserveAdditionalMemoryIfNecessary(): Unit = { + if (byteArrayChunkOutputStream.size > unrollMemoryUsedByThisBlock) { + val amountToRequest = byteArrayChunkOutputStream.size - unrollMemoryUsedByThisBlock + keepUnrolling = reserveUnrollMemoryForThisTask(blockId, amountToRequest) + if (keepUnrolling) { + unrollMemoryUsedByThisBlock += amountToRequest + } + unrollMemoryUsedByThisBlock += amountToRequest + } + } + + // Unroll this block safely, checking whether we have exceeded our threshold + while (values.hasNext && keepUnrolling) { + serializationStream.writeObject(values.next()) + reserveAdditionalMemoryIfNecessary() + } + + if (keepUnrolling) { + serializationStream.close() + reserveAdditionalMemoryIfNecessary() + } + + if (keepUnrolling) { + val entry = SerializedMemoryEntry( + new ChunkedByteBuffer(byteArrayChunkOutputStream.toArrays.map(ByteBuffer.wrap))) + // Synchronize so that transfer is atomic + memoryManager.synchronized { + releaseUnrollMemoryForThisTask(unrollMemoryUsedByThisBlock) + val success = memoryManager.acquireStorageMemory(blockId, entry.size) + assert(success, "transferring unroll memory to storage memory failed") + } + entries.synchronized { + entries.put(blockId, entry) + } + logInfo("Block %s stored as bytes in memory (estimated size %s, free %s)".format( + blockId, Utils.bytesToString(entry.size), Utils.bytesToString(blocksMemoryUsed))) + Right(entry.size) + } else { + // We ran out of space while unrolling the values for this block + logUnrollFailureMessage(blockId, byteArrayChunkOutputStream.size) + Left( + new PartiallySerializedBlock( + this, + blockManager, + blockId, + serializationStream, + redirectableStream, + unrollMemoryUsedByThisBlock, + new ChunkedByteBuffer(byteArrayChunkOutputStream.toArrays.map(ByteBuffer.wrap)), + values)) + } + } + def getBytes(blockId: BlockId): Option[ChunkedByteBuffer] = { val entry = entries.synchronized { entries.get(blockId) } entry match { case null => None case e: DeserializedMemoryEntry => throw new IllegalArgumentException("should only call getBytes on serialized blocks") - case SerializedMemoryEntry(bytes, _) => Some(bytes) + case SerializedMemoryEntry(bytes) => Some(bytes) } } @@ -343,7 +442,7 @@ private[spark] class MemoryStore( if (entry != null) { val data = entry match { case DeserializedMemoryEntry(values, _) => Left(values) - case SerializedMemoryEntry(buffer, _) => Right(buffer) + case SerializedMemoryEntry(buffer) => Right(buffer) } val newEffectiveStorageLevel = blockManager.dropFromMemory(blockId, () => data) if (newEffectiveStorageLevel.isValid) { @@ -463,12 +562,13 @@ private[spark] class MemoryStore( } /** - * The result of a failed [[MemoryStore.putIterator()]] call. + * The result of a failed [[MemoryStore.putIteratorAsValues()]] call. * - * @param memoryStore the memoryStore, used for freeing memory. + * @param memoryStore the memoryStore, used for freeing memory. * @param unrollMemory the amount of unroll memory used by the values in `unrolled`. - * @param unrolled an iterator for the partially-unrolled values. - * @param rest the rest of the original iterator passed to [[MemoryStore.putIterator()]]. + * @param unrolled an iterator for the partially-unrolled values. + * @param rest the rest of the original iterator passed to + * [[MemoryStore.putIteratorAsValues()]]. */ private[storage] class PartiallyUnrolledIterator( memoryStore: MemoryStore, @@ -500,3 +600,52 @@ private[storage] class PartiallyUnrolledIterator( iter = null } } + +private class RedirectableOutputStream extends OutputStream { + private[this] var os: OutputStream = _ + def setOutputStream(s: OutputStream): Unit = { os = s } + override def write(b: Int): Unit = os.write(b) + override def write(b: Array[Byte]): Unit = os.write(b) + override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len) + override def flush(): Unit = os.flush() + override def close(): Unit = os.close() +} + +/** + * The result of a failed [[MemoryStore.putIteratorAsBytes()]] call. + */ +private[storage] class PartiallySerializedBlock( + memoryStore: MemoryStore, + blockManager: BlockManager, + blockId: BlockId, + serializationStream: SerializationStream, + redirectableOutputStream: RedirectableOutputStream, + unrollMemory: Long, + unrolled: ChunkedByteBuffer, + iter: Iterator[Any]) { + + def discard(): Unit = { + try { + serializationStream.close() + } finally { + memoryStore.releaseUnrollMemoryForThisTask(unrollMemory) + } + } + + def finishWriting(os: OutputStream): Unit = { + ByteStreams.copy(unrolled.toInputStream(), os) + redirectableOutputStream.setOutputStream(os) + while (iter.hasNext) { + serializationStream.writeObject(iter.next()) + } + serializationStream.close() + } + + def valuesIterator: PartiallyUnrolledIterator = { + new PartiallyUnrolledIterator( + memoryStore, + unrollMemory, + unrolled = blockManager.dataDeserialize(blockId, unrolled), + rest = iter) + } +} diff --git a/core/src/main/scala/org/apache/spark/util/ByteBufferOutputStream.scala b/core/src/main/scala/org/apache/spark/util/ByteBufferOutputStream.scala index 8527e3ae692e2..09e7579ae9606 100644 --- a/core/src/main/scala/org/apache/spark/util/ByteBufferOutputStream.scala +++ b/core/src/main/scala/org/apache/spark/util/ByteBufferOutputStream.scala @@ -27,6 +27,8 @@ private[spark] class ByteBufferOutputStream(capacity: Int) extends ByteArrayOutp def this() = this(32) + def getCount(): Int = count + def toByteBuffer: ByteBuffer = { return ByteBuffer.wrap(buf, 0, count) } diff --git a/core/src/main/scala/org/apache/spark/util/io/ByteArrayChunkOutputStream.scala b/core/src/main/scala/org/apache/spark/util/io/ByteArrayChunkOutputStream.scala index daac6f971eb20..16fe3be303371 100644 --- a/core/src/main/scala/org/apache/spark/util/io/ByteArrayChunkOutputStream.scala +++ b/core/src/main/scala/org/apache/spark/util/io/ByteArrayChunkOutputStream.scala @@ -30,10 +30,10 @@ import scala.collection.mutable.ArrayBuffer private[spark] class ByteArrayChunkOutputStream(chunkSize: Int) extends OutputStream { - private val chunks = new ArrayBuffer[Array[Byte]] + private[this] val chunks = new ArrayBuffer[Array[Byte]] /** Index of the last chunk. Starting with -1 when the chunks array is empty. */ - private var lastChunkIndex = -1 + private[this] var lastChunkIndex = -1 /** * Next position to write in the last chunk. @@ -41,12 +41,16 @@ class ByteArrayChunkOutputStream(chunkSize: Int) extends OutputStream { * If this equals chunkSize, it means for next write we need to allocate a new chunk. * This can also never be 0. */ - private var position = chunkSize + private[this] var position = chunkSize + private[this] var _size = 0 + + def size: Long = _size override def write(b: Int): Unit = { allocateNewChunkIfNeeded() chunks(lastChunkIndex)(position) = b.toByte position += 1 + _size += 1 } override def write(bytes: Array[Byte], off: Int, len: Int): Unit = { @@ -58,6 +62,7 @@ class ByteArrayChunkOutputStream(chunkSize: Int) extends OutputStream { written += thisBatch position += thisBatch } + _size += len } @inline diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala index edf5cd35e40ee..d24d12824f24e 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala @@ -1074,7 +1074,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE assert(memoryStore.currentUnrollMemoryForThisTask === 0) // Unroll with all the space in the world. This should succeed. - var putResult = memoryStore.putIterator("unroll", smallList.iterator, StorageLevel.MEMORY_ONLY) + var putResult = memoryStore.putIteratorAsValues("unroll", smallList.iterator) assert(putResult.isRight) assert(memoryStore.currentUnrollMemoryForThisTask === 0) smallList.iterator.zip(memoryStore.getValues("unroll").get).foreach { case (e, a) => @@ -1085,7 +1085,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE // Unroll with not enough space. This should succeed after kicking out someBlock1. assert(store.putIterator("someBlock1", smallList.iterator, StorageLevel.MEMORY_ONLY)) assert(store.putIterator("someBlock2", smallList.iterator, StorageLevel.MEMORY_ONLY)) - putResult = memoryStore.putIterator("unroll", smallList.iterator, StorageLevel.MEMORY_ONLY) + putResult = memoryStore.putIteratorAsValues("unroll", smallList.iterator) assert(putResult.isRight) assert(memoryStore.currentUnrollMemoryForThisTask === 0) assert(memoryStore.contains("someBlock2")) @@ -1099,7 +1099,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE // 4800 bytes, there is still not enough room to unroll this block. This returns an iterator. // In the mean time, however, we kicked out someBlock2 before giving up. assert(store.putIterator("someBlock3", smallList.iterator, StorageLevel.MEMORY_ONLY)) - putResult = memoryStore.putIterator("unroll", bigList.iterator, StorageLevel.MEMORY_ONLY) + putResult = memoryStore.putIteratorAsValues("unroll", bigList.iterator) assert(memoryStore.currentUnrollMemoryForThisTask > 0) // we returned an iterator assert(!memoryStore.contains("someBlock2")) assert(putResult.isLeft) @@ -1112,7 +1112,6 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE test("safely unroll blocks through putIterator") { store = makeBlockManager(12000) - val memOnly = StorageLevel.MEMORY_ONLY val memoryStore = store.memoryStore val smallList = List.fill(40)(new Array[Byte](100)) val bigList = List.fill(40)(new Array[Byte](1000)) @@ -1121,8 +1120,8 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE assert(memoryStore.currentUnrollMemoryForThisTask === 0) // Unroll with plenty of space. This should succeed and cache both blocks. - val result1 = memoryStore.putIterator("b1", smallIterator, memOnly) - val result2 = memoryStore.putIterator("b2", smallIterator, memOnly) + val result1 = memoryStore.putIteratorAsValues("b1", smallIterator) + val result2 = memoryStore.putIteratorAsValues("b2", smallIterator) assert(memoryStore.contains("b1")) assert(memoryStore.contains("b2")) assert(result1.isRight) // unroll was successful @@ -1133,21 +1132,21 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE // would not know how to drop them from memory later. memoryStore.remove("b1") memoryStore.remove("b2") - store.putIterator("b1", smallIterator, memOnly) - store.putIterator("b2", smallIterator, memOnly) + store.putIterator("b1", smallIterator, StorageLevel.MEMORY_ONLY) + store.putIterator("b2", smallIterator, StorageLevel.MEMORY_ONLY) // Unroll with not enough space. This should succeed but kick out b1 in the process. - val result3 = memoryStore.putIterator("b3", smallIterator, memOnly) + val result3 = memoryStore.putIteratorAsValues("b3", smallIterator) assert(result3.isRight) assert(!memoryStore.contains("b1")) assert(memoryStore.contains("b2")) assert(memoryStore.contains("b3")) assert(memoryStore.currentUnrollMemoryForThisTask === 0) memoryStore.remove("b3") - store.putIterator("b3", smallIterator, memOnly) + store.putIterator("b3", smallIterator, StorageLevel.MEMORY_ONLY) // Unroll huge block with not enough space. This should fail and kick out b2 in the process. - val result4 = memoryStore.putIterator("b4", bigIterator, memOnly) + val result4 = memoryStore.putIteratorAsValues("b4", bigIterator) assert(result4.isLeft) // unroll was unsuccessful assert(!memoryStore.contains("b1")) assert(!memoryStore.contains("b2")) @@ -1161,7 +1160,6 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE */ test("safely unroll blocks through putIterator (disk)") { store = makeBlockManager(12000) - val memAndDisk = StorageLevel.MEMORY_AND_DISK val memoryStore = store.memoryStore val diskStore = store.diskStore val smallList = List.fill(40)(new Array[Byte](100)) @@ -1170,12 +1168,12 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE def bigIterator: Iterator[Any] = bigList.iterator.asInstanceOf[Iterator[Any]] assert(memoryStore.currentUnrollMemoryForThisTask === 0) - store.putIterator("b1", smallIterator, memAndDisk) - store.putIterator("b2", smallIterator, memAndDisk) + store.putIterator("b1", smallIterator, StorageLevel.MEMORY_AND_DISK) + store.putIterator("b2", smallIterator, StorageLevel.MEMORY_AND_DISK) // Unroll with not enough space. This should succeed but kick out b1 in the process. // Memory store should contain b2 and b3, while disk store should contain only b1 - val result3 = memoryStore.putIterator("b3", smallIterator, memAndDisk) + val result3 = memoryStore.putIteratorAsValues("b3", smallIterator) assert(result3.isRight) assert(!memoryStore.contains("b1")) assert(memoryStore.contains("b2")) @@ -1191,7 +1189,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE // the block may be stored to disk. During the unrolling process, block "b2" should be kicked // out, so the memory store should contain only b3, while the disk store should contain // b1, b2 and b4. - val result4 = memoryStore.putIterator("b4", bigIterator, memAndDisk) + val result4 = memoryStore.putIteratorAsValues("b4", bigIterator) assert(result4.isLeft) assert(!memoryStore.contains("b1")) assert(!memoryStore.contains("b2")) @@ -1211,28 +1209,28 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE assert(memoryStore.currentUnrollMemoryForThisTask === 0) // All unroll memory used is released because putIterator did not return an iterator - assert(memoryStore.putIterator("b1", smallIterator, memOnly).isRight) + assert(memoryStore.putIteratorAsValues("b1", smallIterator).isRight) assert(memoryStore.currentUnrollMemoryForThisTask === 0) - assert(memoryStore.putIterator("b2", smallIterator, memOnly).isRight) + assert(memoryStore.putIteratorAsValues("b2", smallIterator).isRight) assert(memoryStore.currentUnrollMemoryForThisTask === 0) // Unroll memory is not released because putIterator returned an iterator // that still depends on the underlying vector used in the process - assert(memoryStore.putIterator("b3", smallIterator, memOnly).isLeft) + assert(memoryStore.putIteratorAsValues("b3", smallIterator).isLeft) val unrollMemoryAfterB3 = memoryStore.currentUnrollMemoryForThisTask assert(unrollMemoryAfterB3 > 0) // The unroll memory owned by this thread builds on top of its value after the previous unrolls - assert(memoryStore.putIterator("b4", smallIterator, memOnly).isLeft) + assert(memoryStore.putIteratorAsValues("b4", smallIterator).isLeft) val unrollMemoryAfterB4 = memoryStore.currentUnrollMemoryForThisTask assert(unrollMemoryAfterB4 > unrollMemoryAfterB3) // ... but only to a certain extent (until we run out of free space to grant new unroll memory) - assert(memoryStore.putIterator("b5", smallIterator, memOnly).isLeft) + assert(memoryStore.putIteratorAsValues("b5", smallIterator).isLeft) val unrollMemoryAfterB5 = memoryStore.currentUnrollMemoryForThisTask - assert(memoryStore.putIterator("b6", smallIterator, memOnly).isLeft) + assert(memoryStore.putIteratorAsValues("b6", smallIterator).isLeft) val unrollMemoryAfterB6 = memoryStore.currentUnrollMemoryForThisTask - assert(memoryStore.putIterator("b7", smallIterator, memOnly).isLeft) + assert(memoryStore.putIteratorAsValues("b7", smallIterator).isLeft) val unrollMemoryAfterB7 = memoryStore.currentUnrollMemoryForThisTask assert(unrollMemoryAfterB5 === unrollMemoryAfterB4) assert(unrollMemoryAfterB6 === unrollMemoryAfterB4)