Skip to content

Commit

Permalink
stream: do not flush destroyed writable
Browse files Browse the repository at this point in the history
It doesn't make much sense to flush a stream which has been destroyed.

PR-URL: #29028
Reviewed-By: James M Snell <[email protected]>
Reviewed-By: Matteo Collina <[email protected]>
Reviewed-By: Anna Henningsen <[email protected]>
  • Loading branch information
ronag authored and Trott committed Sep 22, 2019
1 parent 95d6ad6 commit aa32e13
Show file tree
Hide file tree
Showing 4 changed files with 85 additions and 8 deletions.
24 changes: 21 additions & 3 deletions lib/_stream_writable.js
Original file line number Diff line number Diff line change
Expand Up @@ -299,9 +299,13 @@ Writable.prototype.write = function(chunk, encoding, cb) {
if (typeof cb !== 'function')
cb = nop;

if (state.ending)
if (state.ending) {
writeAfterEnd(this, cb);
else if (isBuf || validChunk(this, state, chunk, cb)) {
} else if (state.destroyed) {
const err = new ERR_STREAM_DESTROYED('write');
process.nextTick(cb, err);
errorOrDestroy(this, err);
} else if (isBuf || validChunk(this, state, chunk, cb)) {
state.pendingcb++;
ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
}
Expand Down Expand Up @@ -733,7 +737,21 @@ Object.defineProperty(Writable.prototype, 'writableFinished', {
}
});

Writable.prototype.destroy = destroyImpl.destroy;
const destroy = destroyImpl.destroy;
Writable.prototype.destroy = function(err, cb) {
const state = this._writableState;
if (!state.destroyed) {
for (let entry = state.bufferedRequest; entry; entry = entry.next) {
process.nextTick(entry.callback, new ERR_STREAM_DESTROYED('write'));
}
state.bufferedRequest = null;
state.lastBufferedRequest = null;
state.bufferedRequestCount = 0;
}
destroy.call(this, err, cb);
return this;
};

Writable.prototype._undestroy = destroyImpl.undestroy;
Writable.prototype._destroy = function(err, cb) {
cb(err);
Expand Down
6 changes: 5 additions & 1 deletion test/parallel/test-http2-server-stream-session-destroy.js
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,11 @@ server.on('stream', common.mustCall((stream) => {
code: 'ERR_STREAM_WRITE_AFTER_END',
message: 'write after end'
}));
assert.strictEqual(stream.write('data'), false);
assert.strictEqual(stream.write('data', common.expectsError({
type: Error,
code: 'ERR_STREAM_WRITE_AFTER_END',
message: 'write after end'
})), false);
}));

server.listen(0, common.mustCall(() => {
Expand Down
46 changes: 46 additions & 0 deletions test/parallel/test-stream-writable-destroy.js
Original file line number Diff line number Diff line change
Expand Up @@ -232,3 +232,49 @@ const assert = require('assert');
write._undestroy();
write.end();
}

{
const write = new Writable();

write.destroy();
write.on('error', common.expectsError({
type: Error,
code: 'ERR_STREAM_DESTROYED',
message: 'Cannot call write after a stream was destroyed'
}));
write.write('asd', common.expectsError({
type: Error,
code: 'ERR_STREAM_DESTROYED',
message: 'Cannot call write after a stream was destroyed'
}));
}

{
const write = new Writable({
write(chunk, enc, cb) { cb(); }
});

write.on('error', common.expectsError({
type: Error,
code: 'ERR_STREAM_DESTROYED',
message: 'Cannot call write after a stream was destroyed'
}));

write.cork();
write.write('asd', common.mustCall());
write.uncork();

write.cork();
write.write('asd', common.expectsError({
type: Error,
code: 'ERR_STREAM_DESTROYED',
message: 'Cannot call write after a stream was destroyed'
}));
write.destroy();
write.write('asd', common.expectsError({
type: Error,
code: 'ERR_STREAM_DESTROYED',
message: 'Cannot call write after a stream was destroyed'
}));
write.uncork();
}
17 changes: 13 additions & 4 deletions test/parallel/test-stream-write-destroy.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,16 @@ for (const withPendingData of [ false, true ]) {
w.on('drain', () => drains++);
w.on('finish', () => finished = true);

w.write('abc', () => chunksWritten++);
function onWrite(err) {
if (err) {
assert.strictEqual(w.destroyed, true);
assert.strictEqual(err.code, 'ERR_STREAM_DESTROYED');
} else {
chunksWritten++;
}
}

w.write('abc', onWrite);
assert.strictEqual(chunksWritten, 0);
assert.strictEqual(drains, 0);
callbacks.shift()();
Expand All @@ -34,14 +43,14 @@ for (const withPendingData of [ false, true ]) {
if (withPendingData) {
// Test 2 cases: There either is or is not data still in the write queue.
// (The second write will never actually get executed either way.)
w.write('def', () => chunksWritten++);
w.write('def', onWrite);
}
if (useEnd) {
// Again, test 2 cases: Either we indicate that we want to end the
// writable or not.
w.end('ghi', () => chunksWritten++);
w.end('ghi', onWrite);
} else {
w.write('ghi', () => chunksWritten++);
w.write('ghi', onWrite);
}

assert.strictEqual(chunksWritten, 1);
Expand Down

0 comments on commit aa32e13

Please sign in to comment.