diff --git a/doc/api/http2.md b/doc/api/http2.md
index 8698af204cf..acd985ede91 100644
--- a/doc/api/http2.md
+++ b/doc/api/http2.md
@@ -2065,8 +2065,8 @@ properties.
* `maxConcurrentStreams` {number} Specifies the maximum number of concurrent
streams permitted on an `Http2Session`. There is no default value which
implies, at least theoretically, 231-1 streams may be open
- concurrently at any given time in an `Http2Session`. The minimum value is
- 0. The maximum allowed value is 231-1.
+ concurrently at any given time in an `Http2Session`. The minimum value
+ is 0. The maximum allowed value is 231-1.
* `maxHeaderListSize` {number} Specifies the maximum size (uncompressed octets)
of header list that will be accepted. The minimum allowed value is 0. The
maximum allowed value is 232-1. **Default:** 65535.
diff --git a/lib/child_process.js b/lib/child_process.js
index 2106bdc6819..97b436cc076 100644
--- a/lib/child_process.js
+++ b/lib/child_process.js
@@ -386,9 +386,12 @@ function _convertCustomFds(options) {
}
function normalizeSpawnArguments(file, args, options) {
- if (typeof file !== 'string' || file.length === 0)
+ if (typeof file !== 'string')
throw new ERR_INVALID_ARG_TYPE('file', 'string', file);
+ if (file.length === 0)
+ throw new ERR_INVALID_ARG_VALUE('file', file, 'cannot be empty');
+
if (Array.isArray(args)) {
args = args.slice(0);
} else if (args !== undefined &&
diff --git a/lib/net.js b/lib/net.js
index c9116fb1a80..aa5709981ca 100644
--- a/lib/net.js
+++ b/lib/net.js
@@ -42,11 +42,17 @@ const {
const { Buffer } = require('buffer');
const TTYWrap = process.binding('tty_wrap');
-const { TCP, constants: TCPConstants } = process.binding('tcp_wrap');
-const { Pipe, constants: PipeConstants } = process.binding('pipe_wrap');
-const { TCPConnectWrap } = process.binding('tcp_wrap');
-const { PipeConnectWrap } = process.binding('pipe_wrap');
const { ShutdownWrap } = process.binding('stream_wrap');
+const {
+ TCP,
+ TCPConnectWrap,
+ constants: TCPConstants
+} = process.binding('tcp_wrap');
+const {
+ Pipe,
+ PipeConnectWrap,
+ constants: PipeConstants
+} = process.binding('pipe_wrap');
const {
newAsyncId,
defaultTriggerAsyncIdScope,
diff --git a/lib/stream.js b/lib/stream.js
index 9a816600a05..ba056026d8b 100644
--- a/lib/stream.js
+++ b/lib/stream.js
@@ -38,16 +38,15 @@ Stream.Stream = Stream;
// Internal utilities
try {
- Stream._isUint8Array = require('internal/util/types').isUint8Array;
-} catch (e) {
- // Throws for code outside of Node.js core.
-
- try {
- Stream._isUint8Array = process.binding('util').isUint8Array;
- } catch (e) {
+ const types = require('util').types;
+ if (types && typeof types.isUint8Array === 'function') {
+ Stream._isUint8Array = types.isUint8Array;
+ } else {
// This throws for Node < 4.2.0 because there's no util binding and
// returns undefined for Node < 7.4.0.
+ Stream._isUint8Array = process.binding('util').isUint8Array;
}
+} catch (e) {
}
if (!Stream._isUint8Array) {
diff --git a/lib/timers.js b/lib/timers.js
index 32f6ccb00cd..88fd31ad4cc 100644
--- a/lib/timers.js
+++ b/lib/timers.js
@@ -66,7 +66,7 @@ const kRefed = Symbol('refed');
// Therefore, it is very important that the timers implementation is performant
// and efficient.
//
-// Note: It is suggested you first read though the lib/internal/linkedlist.js
+// Note: It is suggested you first read through the lib/internal/linkedlist.js
// linked list implementation, since timers depend on it extensively. It can be
// somewhat counter-intuitive at first, as it is not actually a class. Instead,
// it is a set of helpers that operate on an existing object.
diff --git a/lib/zlib.js b/lib/zlib.js
index 317fd83ce0f..01a2ebe9335 100644
--- a/lib/zlib.js
+++ b/lib/zlib.js
@@ -28,9 +28,13 @@ const {
ERR_ZLIB_INITIALIZATION_FAILED
} = require('internal/errors').codes;
const Transform = require('_stream_transform');
-const { _extend } = require('util');
-const { isAnyArrayBuffer } = process.binding('util');
-const { isArrayBufferView } = require('internal/util/types');
+const {
+ _extend,
+ types: {
+ isAnyArrayBuffer,
+ isArrayBufferView
+ }
+} = require('util');
const binding = process.binding('zlib');
const assert = require('assert').ok;
const {
diff --git a/src/env.h b/src/env.h
index a688f7be505..19079aa5f07 100644
--- a/src/env.h
+++ b/src/env.h
@@ -292,24 +292,25 @@ struct PackageConfig {
#define ENVIRONMENT_STRONG_PERSISTENT_PROPERTIES(V) \
V(as_external, v8::External) \
+ V(async_hooks_after_function, v8::Function) \
+ V(async_hooks_before_function, v8::Function) \
+ V(async_hooks_binding, v8::Object) \
V(async_hooks_destroy_function, v8::Function) \
V(async_hooks_init_function, v8::Function) \
- V(async_hooks_before_function, v8::Function) \
- V(async_hooks_after_function, v8::Function) \
V(async_hooks_promise_resolve_function, v8::Function) \
- V(async_hooks_binding, v8::Object) \
V(buffer_prototype_object, v8::Object) \
V(context, v8::Context) \
V(domain_callback, v8::Function) \
+ V(fdclose_constructor_template, v8::ObjectTemplate) \
V(fd_constructor_template, v8::ObjectTemplate) \
V(filehandlereadwrap_template, v8::ObjectTemplate) \
V(fsreqpromise_constructor_template, v8::ObjectTemplate) \
- V(fdclose_constructor_template, v8::ObjectTemplate) \
+ V(fs_use_promises_symbol, v8::Symbol) \
V(host_import_module_dynamically_callback, v8::Function) \
V(host_initialize_import_meta_object_callback, v8::Function) \
V(http2ping_constructor_template, v8::ObjectTemplate) \
- V(http2stream_constructor_template, v8::ObjectTemplate) \
V(http2settings_constructor_template, v8::ObjectTemplate) \
+ V(http2stream_constructor_template, v8::ObjectTemplate) \
V(immediate_callback_function, v8::Function) \
V(inspector_console_api_object, v8::Object) \
V(pbkdf2_constructor_template, v8::ObjectTemplate) \
@@ -334,8 +335,7 @@ struct PackageConfig {
V(udp_constructor_function, v8::Function) \
V(vm_parsing_context_symbol, v8::Symbol) \
V(url_constructor_function, v8::Function) \
- V(write_wrap_template, v8::ObjectTemplate) \
- V(fs_use_promises_symbol, v8::Symbol)
+ V(write_wrap_template, v8::ObjectTemplate)
class Environment;
diff --git a/src/node_file.cc b/src/node_file.cc
index 57894fb8e6f..e60fabda05e 100644
--- a/src/node_file.cc
+++ b/src/node_file.cc
@@ -687,16 +687,16 @@ void AfterScanDir(uv_fs_t* req) {
}
-// This struct is only used on sync fs calls.
+// This class is only used on sync fs calls.
// For async calls FSReqWrap is used.
-class fs_req_wrap {
+class FSReqWrapSync {
public:
- fs_req_wrap() {}
- ~fs_req_wrap() { uv_fs_req_cleanup(&req); }
+ FSReqWrapSync() {}
+ ~FSReqWrapSync() { uv_fs_req_cleanup(&req); }
uv_fs_t req;
private:
- DISALLOW_COPY_AND_ASSIGN(fs_req_wrap);
+ DISALLOW_COPY_AND_ASSIGN(FSReqWrapSync);
};
// Returns nullptr if the operation fails from the start.
@@ -740,7 +740,7 @@ inline FSReqBase* AsyncCall(Environment* env,
// creating an error in the C++ land.
// ctx must be checked using value->IsObject() before being passed.
template
-inline int SyncCall(Environment* env, Local ctx, fs_req_wrap* req_wrap,
+inline int SyncCall(Environment* env, Local ctx, FSReqWrapSync* req_wrap,
const char* syscall, Func fn, Args... args) {
env->PrintSyncTrace();
int err = fn(env->event_loop(), &(req_wrap->req), args..., nullptr);
@@ -786,7 +786,7 @@ void Access(const FunctionCallbackInfo& args) {
uv_fs_access, *path, mode);
} else { // access(path, mode, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[3], &req_wrap, "access", uv_fs_access, *path, mode);
}
}
@@ -807,7 +807,7 @@ void Close(const FunctionCallbackInfo& args) {
uv_fs_close, fd);
} else { // close(fd, undefined, ctx)
CHECK_EQ(argc, 3);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[2], &req_wrap, "close", uv_fs_close, fd);
}
}
@@ -915,7 +915,7 @@ static void Stat(const FunctionCallbackInfo& args) {
uv_fs_stat, *path);
} else { // stat(path, undefined, ctx)
CHECK_EQ(argc, 3);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int err = SyncCall(env, args[2], &req_wrap, "stat", uv_fs_stat, *path);
if (err == 0) {
FillStatsArray(env->fs_stats_field_array(),
@@ -939,7 +939,7 @@ static void LStat(const FunctionCallbackInfo& args) {
uv_fs_lstat, *path);
} else { // lstat(path, undefined, ctx)
CHECK_EQ(argc, 3);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int err = SyncCall(env, args[2], &req_wrap, "lstat", uv_fs_lstat, *path);
if (err == 0) {
FillStatsArray(env->fs_stats_field_array(),
@@ -963,7 +963,7 @@ static void FStat(const FunctionCallbackInfo& args) {
uv_fs_fstat, fd);
} else { // fstat(fd, undefined, ctx)
CHECK_EQ(argc, 3);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int err = SyncCall(env, args[2], &req_wrap, "fstat", uv_fs_fstat, fd);
if (err == 0) {
FillStatsArray(env->fs_stats_field_array(),
@@ -992,7 +992,7 @@ static void Symlink(const FunctionCallbackInfo& args) {
AfterNoArgs, uv_fs_symlink, *target, *path, flags);
} else { // symlink(target, path, flags, undefinec, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[4], &req, "symlink",
uv_fs_symlink, *target, *path, flags);
}
@@ -1016,7 +1016,7 @@ static void Link(const FunctionCallbackInfo& args) {
AfterNoArgs, uv_fs_link, *src, *dest);
} else { // link(src, dest)
CHECK_EQ(argc, 4);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[3], &req, "link",
uv_fs_link, *src, *dest);
}
@@ -1039,7 +1039,7 @@ static void ReadLink(const FunctionCallbackInfo& args) {
uv_fs_readlink, *path);
} else {
CHECK_EQ(argc, 4);
- fs_req_wrap req;
+ FSReqWrapSync req;
int err = SyncCall(env, args[3], &req, "readlink",
uv_fs_readlink, *path);
if (err < 0) {
@@ -1079,7 +1079,7 @@ static void Rename(const FunctionCallbackInfo& args) {
UTF8, AfterNoArgs, uv_fs_rename, *old_path, *new_path);
} else {
CHECK_EQ(argc, 4);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[3], &req, "rename", uv_fs_rename, *old_path, *new_path);
}
}
@@ -1102,7 +1102,7 @@ static void FTruncate(const FunctionCallbackInfo& args) {
uv_fs_ftruncate, fd, len);
} else {
CHECK_EQ(argc, 4);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[3], &req, "ftruncate", uv_fs_ftruncate, fd, len);
}
}
@@ -1122,7 +1122,7 @@ static void Fdatasync(const FunctionCallbackInfo& args) {
uv_fs_fdatasync, fd);
} else {
CHECK_EQ(argc, 3);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[2], &req, "fdatasync", uv_fs_fdatasync, fd);
}
}
@@ -1142,7 +1142,7 @@ static void Fsync(const FunctionCallbackInfo& args) {
uv_fs_fsync, fd);
} else {
CHECK_EQ(argc, 3);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[2], &req, "fsync", uv_fs_fsync, fd);
}
}
@@ -1162,7 +1162,7 @@ static void Unlink(const FunctionCallbackInfo& args) {
uv_fs_unlink, *path);
} else {
CHECK_EQ(argc, 3);
- fs_req_wrap req;
+ FSReqWrapSync req;
SyncCall(env, args[2], &req, "unlink", uv_fs_unlink, *path);
}
}
@@ -1182,7 +1182,7 @@ static void RMDir(const FunctionCallbackInfo& args) {
uv_fs_rmdir, *path);
} else { // rmdir(path, undefined, ctx)
CHECK_EQ(argc, 3);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[2], &req_wrap, "rmdir",
uv_fs_rmdir, *path);
}
@@ -1206,7 +1206,7 @@ static void MKDir(const FunctionCallbackInfo& args) {
uv_fs_mkdir, *path, mode);
} else { // mkdir(path, mode, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[3], &req_wrap, "mkdir",
uv_fs_mkdir, *path, mode);
}
@@ -1229,7 +1229,7 @@ static void RealPath(const FunctionCallbackInfo& args) {
uv_fs_realpath, *path);
} else { // realpath(path, encoding, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int err = SyncCall(env, args[3], &req_wrap, "realpath",
uv_fs_realpath, *path);
if (err < 0) {
@@ -1270,7 +1270,7 @@ static void ReadDir(const FunctionCallbackInfo& args) {
uv_fs_scandir, *path, 0 /*flags*/);
} else { // readdir(path, encoding, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int err = SyncCall(env, args[3], &req_wrap, "scandir",
uv_fs_scandir, *path, 0 /*flags*/);
if (err < 0) {
@@ -1354,7 +1354,7 @@ static void Open(const FunctionCallbackInfo& args) {
uv_fs_open, *path, flags, mode);
} else { // open(path, flags, mode, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int result = SyncCall(env, args[4], &req_wrap, "open",
uv_fs_open, *path, flags, mode);
args.GetReturnValue().Set(result);
@@ -1382,7 +1382,7 @@ static void OpenFileHandle(const FunctionCallbackInfo& args) {
uv_fs_open, *path, flags, mode);
} else { // openFileHandle(path, flags, mode, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int result = SyncCall(env, args[4], &req_wrap, "open",
uv_fs_open, *path, flags, mode);
if (result < 0) {
@@ -1416,7 +1416,7 @@ static void CopyFile(const FunctionCallbackInfo& args) {
uv_fs_copyfile, *src, *dest, flags);
} else { // copyFile(src, dest, flags, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[4], &req_wrap, "copyfile",
uv_fs_copyfile, *src, *dest, flags);
}
@@ -1467,7 +1467,7 @@ static void WriteBuffer(const FunctionCallbackInfo& args) {
uv_fs_write, fd, &uvbuf, 1, pos);
} else { // write(fd, buffer, off, len, pos, undefined, ctx)
CHECK_EQ(argc, 7);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int bytesWritten = SyncCall(env, args[6], &req_wrap, "write",
uv_fs_write, fd, &uvbuf, 1, pos);
args.GetReturnValue().Set(bytesWritten);
@@ -1510,7 +1510,7 @@ static void WriteBuffers(const FunctionCallbackInfo& args) {
uv_fs_write, fd, *iovs, iovs.length(), pos);
} else { // writeBuffers(fd, chunks, pos, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
int bytesWritten = SyncCall(env, args[4], &req_wrap, "write",
uv_fs_write, fd, *iovs, iovs.length(), pos);
args.GetReturnValue().Set(bytesWritten);
@@ -1589,7 +1589,7 @@ static void WriteString(const FunctionCallbackInfo& args) {
}
} else { // write(fd, string, pos, enc, undefined, ctx)
CHECK_EQ(argc, 6);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
FSReqBase::FSReqBuffer stack_buffer;
if (buf == nullptr) {
len = StringBytes::StorageSize(env->isolate(), value, enc);
@@ -1659,7 +1659,7 @@ static void Read(const FunctionCallbackInfo& args) {
uv_fs_read, fd, &uvbuf, 1, pos);
} else { // read(fd, buffer, offset, len, pos, undefined, ctx)
CHECK_EQ(argc, 7);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
const int bytesRead = SyncCall(env, args[6], &req_wrap, "read",
uv_fs_read, fd, &uvbuf, 1, pos);
args.GetReturnValue().Set(bytesRead);
@@ -1693,7 +1693,7 @@ static void Chmod(const FunctionCallbackInfo& args) {
uv_fs_chmod, *path, mode);
} else { // chmod(path, mode, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[3], &req_wrap, "chmod",
uv_fs_chmod, *path, mode);
}
@@ -1721,7 +1721,7 @@ static void FChmod(const FunctionCallbackInfo& args) {
uv_fs_fchmod, fd, mode);
} else { // fchmod(fd, mode, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[3], &req_wrap, "fchmod",
uv_fs_fchmod, fd, mode);
}
@@ -1752,7 +1752,7 @@ static void Chown(const FunctionCallbackInfo& args) {
uv_fs_chown, *path, uid, gid);
} else { // chown(path, uid, gid, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[4], &req_wrap, "chown",
uv_fs_chown, *path, uid, gid);
}
@@ -1783,7 +1783,7 @@ static void FChown(const FunctionCallbackInfo& args) {
uv_fs_fchown, fd, uid, gid);
} else { // fchown(fd, uid, gid, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[4], &req_wrap, "fchown",
uv_fs_fchown, fd, uid, gid);
}
@@ -1811,7 +1811,7 @@ static void UTimes(const FunctionCallbackInfo& args) {
uv_fs_utime, *path, atime, mtime);
} else { // utimes(path, atime, mtime, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[4], &req_wrap, "utime",
uv_fs_utime, *path, atime, mtime);
}
@@ -1838,7 +1838,7 @@ static void FUTimes(const FunctionCallbackInfo& args) {
uv_fs_futime, fd, atime, mtime);
} else { // futimes(fd, atime, mtime, undefined, ctx)
CHECK_EQ(argc, 5);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[4], &req_wrap, "futime",
uv_fs_futime, fd, atime, mtime);
}
@@ -1861,7 +1861,7 @@ static void Mkdtemp(const FunctionCallbackInfo& args) {
uv_fs_mkdtemp, *tmpl);
} else { // mkdtemp(tmpl, encoding, undefined, ctx)
CHECK_EQ(argc, 4);
- fs_req_wrap req_wrap;
+ FSReqWrapSync req_wrap;
SyncCall(env, args[3], &req_wrap, "mkdtemp",
uv_fs_mkdtemp, *tmpl);
const char* path = static_cast(req_wrap.req.path);
diff --git a/test/parallel/test-async-wrap-GH13045.js b/test/parallel/test-async-wrap-GH13045.js
deleted file mode 100644
index bb4e1a0c411..00000000000
--- a/test/parallel/test-async-wrap-GH13045.js
+++ /dev/null
@@ -1,55 +0,0 @@
-'use strict';
-const common = require('../common');
-if (!common.hasCrypto)
- common.skip('missing crypto');
-
-// Refs: https://github.com/nodejs/node/issues/13045
-// An HTTP Agent reuses a TLSSocket, and makes a failed call to `asyncReset`.
-
-const assert = require('assert');
-const https = require('https');
-const fixtures = require('../common/fixtures');
-
-const serverOptions = {
- key: fixtures.readKey('agent1-key.pem'),
- cert: fixtures.readKey('agent1-cert.pem'),
- ca: fixtures.readKey('ca1-cert.pem')
-};
-
-const server = https.createServer(serverOptions, common.mustCall((req, res) => {
- res.end('hello world\n');
-}, 2));
-
-server.listen(0, common.mustCall(function() {
- const port = this.address().port;
- const clientOptions = {
- agent: new https.Agent({
- keepAlive: true,
- rejectUnauthorized: false
- }),
- port: port
- };
-
- const req = https.get(clientOptions, common.mustCall((res) => {
- assert.strictEqual(res.statusCode, 200);
- res.on('error', (err) => assert.fail(err));
- res.socket.on('error', (err) => assert.fail(err));
- res.resume();
- // drain the socket and wait for it to be free to reuse
- res.socket.once('free', () => {
- // This is the pain point. Internally the Agent will call
- // `socket._handle.asyncReset()` and if the _handle does not implement
- // `asyncReset` this will throw TypeError
- const req2 = https.get(clientOptions, common.mustCall((res2) => {
- assert.strictEqual(res.statusCode, 200);
- res2.on('error', (err) => assert.fail(err));
- res2.socket.on('error', (err) => assert.fail(err));
- // this should be the end of the test
- res2.destroy();
- server.close();
- }));
- req2.on('error', (err) => assert.fail(err));
- });
- }));
- req.on('error', (err) => assert.fail(err));
-}));
diff --git a/test/parallel/test-async-wrap-tlssocket-asyncreset.js b/test/parallel/test-async-wrap-tlssocket-asyncreset.js
new file mode 100644
index 00000000000..37160741fcd
--- /dev/null
+++ b/test/parallel/test-async-wrap-tlssocket-asyncreset.js
@@ -0,0 +1,66 @@
+'use strict';
+const common = require('../common');
+if (!common.hasCrypto) common.skip('missing crypto');
+const fixtures = require('../common/fixtures');
+
+// An HTTP Agent reuses a TLSSocket, and makes a failed call to `asyncReset`.
+// Refs: https://github.com/nodejs/node/issues/13045
+
+const assert = require('assert');
+const https = require('https');
+
+const serverOptions = {
+ key: fixtures.readKey('agent1-key.pem'),
+ cert: fixtures.readKey('agent1-cert.pem'),
+ ca: fixtures.readKey('ca1-cert.pem')
+};
+
+const server = https.createServer(
+ serverOptions,
+ common.mustCall((req, res) => {
+ res.end('hello world\n');
+ }, 2)
+);
+
+server.listen(
+ 0,
+ common.mustCall(function() {
+ const port = this.address().port;
+ const clientOptions = {
+ agent: new https.Agent({
+ keepAlive: true,
+ rejectUnauthorized: false
+ }),
+ port: port
+ };
+
+ const req = https.get(
+ clientOptions,
+ common.mustCall((res) => {
+ assert.strictEqual(res.statusCode, 200);
+ res.on('error', (err) => assert.fail(err));
+ res.socket.on('error', (err) => assert.fail(err));
+ res.resume();
+ // drain the socket and wait for it to be free to reuse
+ res.socket.once('free', () => {
+ // This is the pain point. Internally the Agent will call
+ // `socket._handle.asyncReset()` and if the _handle does not implement
+ // `asyncReset` this will throw TypeError
+ const req2 = https.get(
+ clientOptions,
+ common.mustCall((res2) => {
+ assert.strictEqual(res.statusCode, 200);
+ res2.on('error', (err) => assert.fail(err));
+ res2.socket.on('error', (err) => assert.fail(err));
+ // this should be the end of the test
+ res2.destroy();
+ server.close();
+ })
+ );
+ req2.on('error', (err) => assert.fail(err));
+ });
+ })
+ );
+ req.on('error', (err) => assert.fail(err));
+ })
+);
diff --git a/test/parallel/test-child-process-fork-closed-channel-segfault.js b/test/parallel/test-child-process-fork-closed-channel-segfault.js
new file mode 100644
index 00000000000..87b599c7bb2
--- /dev/null
+++ b/test/parallel/test-child-process-fork-closed-channel-segfault.js
@@ -0,0 +1,75 @@
+'use strict';
+const common = require('../common');
+
+// Before https://github.com/nodejs/node/pull/2847 a child process trying
+// (asynchronously) to use the closed channel to it's creator caused a segfault.
+
+const assert = require('assert');
+const cluster = require('cluster');
+const net = require('net');
+
+if (!cluster.isMaster) {
+ // Exit on first received handle to leave the queue non-empty in master
+ process.on('message', function() {
+ process.exit(1);
+ });
+ return;
+}
+
+const server = net
+ .createServer(function(s) {
+ if (common.isWindows) {
+ s.on('error', function(err) {
+ // Prevent possible ECONNRESET errors from popping up
+ if (err.code !== 'ECONNRESET') throw err;
+ });
+ }
+ setTimeout(function() {
+ s.destroy();
+ }, 100);
+ })
+ .listen(0, function() {
+ const worker = cluster.fork();
+
+ function send(callback) {
+ const s = net.connect(server.address().port, function() {
+ worker.send({}, s, callback);
+ });
+
+ // https://github.com/nodejs/node/issues/3635#issuecomment-157714683
+ // ECONNREFUSED or ECONNRESET errors can happen if this connection is
+ // still establishing while the server has already closed.
+ // EMFILE can happen if the worker __and__ the server had already closed.
+ s.on('error', function(err) {
+ if (
+ err.code !== 'ECONNRESET' &&
+ err.code !== 'ECONNREFUSED' &&
+ err.code !== 'EMFILE'
+ ) {
+ throw err;
+ }
+ });
+ }
+
+ worker.process.once(
+ 'close',
+ common.mustCall(function() {
+ // Otherwise the crash on `channel.fd` access may happen
+ assert.strictEqual(worker.process.channel, null);
+ server.close();
+ })
+ );
+
+ worker.on('online', function() {
+ send(function(err) {
+ assert.ifError(err);
+ send(function(err) {
+ // Ignore errors when sending the second handle because the worker
+ // may already have exited.
+ if (err && err.message !== 'Channel closed') {
+ throw err;
+ }
+ });
+ });
+ });
+ });
diff --git a/test/parallel/test-child-process-fork-regr-gh-2847.js b/test/parallel/test-child-process-fork-regr-gh-2847.js
deleted file mode 100644
index 9e4412d1f73..00000000000
--- a/test/parallel/test-child-process-fork-regr-gh-2847.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// Before https://github.com/nodejs/node/pull/2847 a child process trying
-// (asynchronously) to use the closed channel to it's creator caused a segfault.
-'use strict';
-
-const common = require('../common');
-const assert = require('assert');
-
-const cluster = require('cluster');
-const net = require('net');
-
-if (!cluster.isMaster) {
- // Exit on first received handle to leave the queue non-empty in master
- process.on('message', function() {
- process.exit(1);
- });
- return;
-}
-
-const server = net.createServer(function(s) {
- if (common.isWindows) {
- s.on('error', function(err) {
- // Prevent possible ECONNRESET errors from popping up
- if (err.code !== 'ECONNRESET')
- throw err;
- });
- }
- setTimeout(function() {
- s.destroy();
- }, 100);
-}).listen(0, function() {
- const worker = cluster.fork();
-
- function send(callback) {
- const s = net.connect(server.address().port, function() {
- worker.send({}, s, callback);
- });
-
- // https://github.com/nodejs/node/issues/3635#issuecomment-157714683
- // ECONNREFUSED or ECONNRESET errors can happen if this connection is still
- // establishing while the server has already closed.
- // EMFILE can happen if the worker __and__ the server had already closed.
- s.on('error', function(err) {
- if ((err.code !== 'ECONNRESET') &&
- (err.code !== 'ECONNREFUSED') &&
- (err.code !== 'EMFILE')) {
- throw err;
- }
- });
- }
-
- worker.process.once('close', common.mustCall(function() {
- // Otherwise the crash on `channel.fd` access may happen
- assert.strictEqual(worker.process.channel, null);
- server.close();
- }));
-
- worker.on('online', function() {
- send(function(err) {
- assert.ifError(err);
- send(function(err) {
- // Ignore errors when sending the second handle because the worker
- // may already have exited.
- if (err && err.message !== 'Channel closed') {
- throw err;
- }
- });
- });
- });
-});
diff --git a/test/parallel/test-child-process-spawn-typeerror.js b/test/parallel/test-child-process-spawn-typeerror.js
index 2a2c1de277c..791cf02280a 100644
--- a/test/parallel/test-child-process-spawn-typeerror.js
+++ b/test/parallel/test-child-process-spawn-typeerror.js
@@ -30,10 +30,10 @@ const invalidcmd = 'hopefully_you_dont_have_this_on_your_machine';
const empty = fixtures.path('empty.js');
const invalidArgValueError =
- common.expectsError({ code: 'ERR_INVALID_ARG_VALUE', type: TypeError }, 13);
+ common.expectsError({ code: 'ERR_INVALID_ARG_VALUE', type: TypeError }, 14);
const invalidArgTypeError =
- common.expectsError({ code: 'ERR_INVALID_ARG_TYPE', type: TypeError }, 11);
+ common.expectsError({ code: 'ERR_INVALID_ARG_TYPE', type: TypeError }, 10);
assert.throws(function() {
const child = spawn(invalidcmd, 'this is not an array');
@@ -53,7 +53,7 @@ assert.throws(function() {
assert.throws(function() {
spawn('');
-}, invalidArgTypeError);
+}, invalidArgValueError);
assert.throws(function() {
const file = { toString() { return null; } };
diff --git a/test/parallel/test-child-process-spawnsync-validation-errors.js b/test/parallel/test-child-process-spawnsync-validation-errors.js
index 802acad1031..e52f04b04ad 100644
--- a/test/parallel/test-child-process-spawnsync-validation-errors.js
+++ b/test/parallel/test-child-process-spawnsync-validation-errors.js
@@ -5,13 +5,15 @@ const spawnSync = require('child_process').spawnSync;
const signals = process.binding('constants').os.signals;
let invalidArgTypeError;
+let invalidArgTypeErrorCount = 62;
if (common.isWindows) {
invalidArgTypeError =
common.expectsError({ code: 'ERR_INVALID_ARG_TYPE', type: TypeError }, 42);
} else {
invalidArgTypeError =
- common.expectsError({ code: 'ERR_INVALID_ARG_TYPE', type: TypeError }, 62);
+ common.expectsError({ code: 'ERR_INVALID_ARG_TYPE', type: TypeError },
+ invalidArgTypeErrorCount);
}
const invalidRangeError =
@@ -76,6 +78,9 @@ if (!common.isWindows) {
fail('uid', Infinity, invalidArgTypeError);
fail('uid', 3.1, invalidArgTypeError);
fail('uid', -3.1, invalidArgTypeError);
+ } else {
+ //Decrement invalidArgTypeErrorCount if validation isn't possible
+ invalidArgTypeErrorCount -= 10;
}
}
@@ -95,6 +100,9 @@ if (!common.isWindows) {
fail('gid', Infinity, invalidArgTypeError);
fail('gid', 3.1, invalidArgTypeError);
fail('gid', -3.1, invalidArgTypeError);
+ } else {
+ //Decrement invalidArgTypeErrorCount if validation isn't possible
+ invalidArgTypeErrorCount -= 10;
}
}
}
diff --git a/test/parallel/test-fs-read-stream.js b/test/parallel/test-fs-read-stream.js
index 8b92eb1c7aa..870edf2820e 100644
--- a/test/parallel/test-fs-read-stream.js
+++ b/test/parallel/test-fs-read-stream.js
@@ -35,7 +35,7 @@ const rangeFile = fixtures.path('x.txt');
let paused = false;
let bytesRead = 0;
- const file = fs.ReadStream(fn);
+ const file = fs.createReadStream(fn);
const fileSize = fs.statSync(fn).size;
assert.strictEqual(file.bytesRead, 0);
diff --git a/test/parallel/test-http-expect-continue.js b/test/parallel/test-http-expect-continue.js
index 7f97ce35927..7d910f0778e 100644
--- a/test/parallel/test-http-expect-continue.js
+++ b/test/parallel/test-http-expect-continue.js
@@ -20,70 +20,63 @@
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
-require('../common');
+const common = require('../common');
const assert = require('assert');
const http = require('http');
-let outstanding_reqs = 0;
const test_req_body = 'some stuff...\n';
const test_res_body = 'other stuff!\n';
let sent_continue = false;
let got_continue = false;
-function handler(req, res) {
- assert.strictEqual(sent_continue, true,
- 'Full response sent before 100 Continue');
+const handler = common.mustCall((req, res) => {
+ assert.ok(sent_continue, 'Full response sent before 100 Continue');
console.error('Server sending full response...');
res.writeHead(200, {
'Content-Type': 'text/plain',
'ABCD': '1'
});
res.end(test_res_body);
-}
+});
-const server = http.createServer(handler);
-server.on('checkContinue', function(req, res) {
+const server = http.createServer();
+server.on('checkContinue', common.mustCall((req, res) => {
console.error('Server got Expect: 100-continue...');
res.writeContinue();
sent_continue = true;
setTimeout(function() {
handler(req, res);
}, 100);
-});
+}));
server.listen(0);
-server.on('listening', function() {
+server.on('listening', common.mustCall(() => {
const req = http.request({
- port: this.address().port,
+ port: server.address().port,
method: 'POST',
path: '/world',
headers: { 'Expect': '100-continue' }
});
console.error('Client sending request...');
- outstanding_reqs++;
let body = '';
- req.on('continue', function() {
+ req.on('continue', common.mustCall(() => {
console.error('Client got 100 Continue...');
got_continue = true;
req.end(test_req_body);
- });
- req.on('response', function(res) {
- assert.strictEqual(got_continue, true,
- 'Full response received before 100 Continue');
+ }));
+ req.on('response', common.mustCall((res) => {
+ assert.ok(got_continue, 'Full response received before 100 Continue');
assert.strictEqual(200, res.statusCode,
`Final status code was ${res.statusCode}, not 200.`);
res.setEncoding('utf8');
res.on('data', function(chunk) { body += chunk; });
- res.on('end', function() {
+ res.on('end', common.mustCall(() => {
console.error('Got full response.');
- assert.strictEqual(body, test_res_body, 'Response body doesn\'t match.');
+ assert.strictEqual(body, test_res_body);
assert.ok('abcd' in res.headers, 'Response headers missing.');
- outstanding_reqs--;
- if (outstanding_reqs === 0) {
- server.close();
- process.exit();
- }
- });
- });
-});
+ server.close();
+ process.exit();
+ }));
+ }));
+}));
diff --git a/test/parallel/test-http-parser.js b/test/parallel/test-http-parser.js
index df3a87f73c8..bc9c6920bc3 100644
--- a/test/parallel/test-http-parser.js
+++ b/test/parallel/test-http-parser.js
@@ -97,9 +97,10 @@ function expectBody(expected) {
parser.reinitialize(HTTPParser.REQUEST);
- assert.throws(function() {
- parser.execute(request, 0, request.length);
- }, Error, 'hello world');
+ assert.throws(
+ () => { parser.execute(request, 0, request.length); },
+ { name: 'Error', message: 'hello world' }
+ );
}
diff --git a/test/parallel/test-http-pipeline-assertionerror-finish.js b/test/parallel/test-http-pipeline-assertionerror-finish.js
new file mode 100644
index 00000000000..2780d4bdf67
--- /dev/null
+++ b/test/parallel/test-http-pipeline-assertionerror-finish.js
@@ -0,0 +1,34 @@
+'use strict';
+const common = require('../common');
+
+// This test ensures that Node.js doesn't crash with an AssertionError at
+// `ServerResponse.resOnFinish` because of an out-of-order 'finish' bug in
+// pipelining.
+// https://github.com/nodejs/node/issues/2639
+
+const http = require('http');
+const net = require('net');
+
+const COUNT = 10;
+
+const server = http
+ .createServer(
+ common.mustCall((req, res) => {
+ // Close the server, we have only one TCP connection anyway
+ server.close();
+ res.writeHead(200);
+ res.write('data');
+
+ setTimeout(function() {
+ res.end();
+ }, (Math.random() * 100) | 0);
+ }, COUNT)
+ )
+ .listen(0, function() {
+ const s = net.connect(this.address().port);
+
+ const big = 'GET / HTTP/1.0\r\n\r\n'.repeat(COUNT);
+
+ s.write(big);
+ s.resume();
+ });
diff --git a/test/parallel/test-http-pipeline-regr-2639.js b/test/parallel/test-http-pipeline-regr-2639.js
deleted file mode 100644
index 8eaf5588aaf..00000000000
--- a/test/parallel/test-http-pipeline-regr-2639.js
+++ /dev/null
@@ -1,24 +0,0 @@
-'use strict';
-const common = require('../common');
-const http = require('http');
-const net = require('net');
-
-const COUNT = 10;
-
-const server = http.createServer(common.mustCall((req, res) => {
- // Close the server, we have only one TCP connection anyway
- server.close();
- res.writeHead(200);
- res.write('data');
-
- setTimeout(function() {
- res.end();
- }, (Math.random() * 100) | 0);
-}, COUNT)).listen(0, function() {
- const s = net.connect(this.address().port);
-
- const big = 'GET / HTTP/1.0\r\n\r\n'.repeat(COUNT);
-
- s.write(big);
- s.resume();
-});
diff --git a/test/parallel/test-http-pipeline-regr-3332.js b/test/parallel/test-http-pipeline-regr-3332.js
deleted file mode 100644
index c940b8d3841..00000000000
--- a/test/parallel/test-http-pipeline-regr-3332.js
+++ /dev/null
@@ -1,27 +0,0 @@
-'use strict';
-require('../common');
-const http = require('http');
-const net = require('net');
-const Countdown = require('../common/countdown');
-
-const big = Buffer.alloc(16 * 1024, 'A');
-
-const COUNT = 1e4;
-
-const countdown = new Countdown(COUNT, () => {
- server.close();
- client.end();
-});
-
-let client;
-const server = http.createServer(function(req, res) {
- res.end(big, function() {
- countdown.dec();
- });
-}).listen(0, function() {
- const req = 'GET / HTTP/1.1\r\n\r\n'.repeat(COUNT);
- client = net.connect(this.address().port, function() {
- client.write(req);
- });
- client.resume();
-});
diff --git a/test/parallel/test-http-pipeline-regr-3508.js b/test/parallel/test-http-pipeline-regr-3508.js
deleted file mode 100644
index 02c1e1787be..00000000000
--- a/test/parallel/test-http-pipeline-regr-3508.js
+++ /dev/null
@@ -1,55 +0,0 @@
-'use strict';
-require('../common');
-const http = require('http');
-const net = require('net');
-
-let once = false;
-let first = null;
-let second = null;
-
-const chunk = Buffer.alloc(1024, 'X');
-
-let size = 0;
-
-let more;
-let done;
-
-const server = http.createServer(function(req, res) {
- if (!once)
- server.close();
- once = true;
-
- if (first === null) {
- first = res;
- return;
- }
- if (second === null) {
- second = res;
- res.write(chunk);
- } else {
- res.end(chunk);
- }
- size += res.outputSize;
- if (size <= req.socket.writableHighWaterMark) {
- more();
- return;
- }
- done();
-}).on('upgrade', function(req, socket) {
- second.end(chunk, function() {
- socket.end();
- });
- first.end('hello');
-}).listen(0, function() {
- const s = net.connect(this.address().port);
- more = function() {
- s.write('GET / HTTP/1.1\r\n\r\n');
- };
- done = function() {
- s.write('GET / HTTP/1.1\r\n\r\n' +
- 'GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: ws\r\n\r\naaa');
- };
- more();
- more();
- s.resume();
-});
diff --git a/test/parallel/test-http-pipeline-requests-connection-leak.js b/test/parallel/test-http-pipeline-requests-connection-leak.js
new file mode 100644
index 00000000000..aa5db56f778
--- /dev/null
+++ b/test/parallel/test-http-pipeline-requests-connection-leak.js
@@ -0,0 +1,34 @@
+'use strict';
+require('../common');
+const Countdown = require('../common/countdown');
+
+// This test ensures Node.js doesn't behave erratically when receiving pipelined
+// requests
+// https://github.com/nodejs/node/issues/3332
+
+const http = require('http');
+const net = require('net');
+
+const big = Buffer.alloc(16 * 1024, 'A');
+
+const COUNT = 1e4;
+
+const countdown = new Countdown(COUNT, () => {
+ server.close();
+ client.end();
+});
+
+let client;
+const server = http
+ .createServer(function(req, res) {
+ res.end(big, function() {
+ countdown.dec();
+ });
+ })
+ .listen(0, function() {
+ const req = 'GET / HTTP/1.1\r\n\r\n'.repeat(COUNT);
+ client = net.connect(this.address().port, function() {
+ client.write(req);
+ });
+ client.resume();
+ });
diff --git a/test/parallel/test-http-pipeline-socket-parser-typeerror.js b/test/parallel/test-http-pipeline-socket-parser-typeerror.js
new file mode 100644
index 00000000000..0cb20e76172
--- /dev/null
+++ b/test/parallel/test-http-pipeline-socket-parser-typeerror.js
@@ -0,0 +1,64 @@
+'use strict';
+require('../common');
+
+// This test ensures that Node.js doesn't crash because of a TypeError by
+// checking in `connectionListener` that the socket still has the parser.
+// https://github.com/nodejs/node/issues/3508
+
+const http = require('http');
+const net = require('net');
+
+let once = false;
+let first = null;
+let second = null;
+
+const chunk = Buffer.alloc(1024, 'X');
+
+let size = 0;
+
+let more;
+let done;
+
+const server = http
+ .createServer(function(req, res) {
+ if (!once) server.close();
+ once = true;
+
+ if (first === null) {
+ first = res;
+ return;
+ }
+ if (second === null) {
+ second = res;
+ res.write(chunk);
+ } else {
+ res.end(chunk);
+ }
+ size += res.outputSize;
+ if (size <= req.socket.writableHighWaterMark) {
+ more();
+ return;
+ }
+ done();
+ })
+ .on('upgrade', function(req, socket) {
+ second.end(chunk, function() {
+ socket.end();
+ });
+ first.end('hello');
+ })
+ .listen(0, function() {
+ const s = net.connect(this.address().port);
+ more = function() {
+ s.write('GET / HTTP/1.1\r\n\r\n');
+ };
+ done = function() {
+ s.write(
+ 'GET / HTTP/1.1\r\n\r\n' +
+ 'GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: ws\r\n\r\naaa'
+ );
+ };
+ more();
+ more();
+ s.resume();
+ });
diff --git a/test/parallel/test-http-regr-gh-2821.js b/test/parallel/test-http-request-large-payload.js
similarity index 70%
rename from test/parallel/test-http-regr-gh-2821.js
rename to test/parallel/test-http-request-large-payload.js
index 9f1df0a0f56..3be100b7404 100644
--- a/test/parallel/test-http-regr-gh-2821.js
+++ b/test/parallel/test-http-request-large-payload.js
@@ -1,5 +1,10 @@
'use strict';
require('../common');
+
+// This test ensures Node.js doesn't throw an error when making requests with
+// the payload 16kb or more in size.
+// https://github.com/nodejs/node/issues/2821
+
const http = require('http');
const server = http.createServer(function(req, res) {
@@ -10,7 +15,6 @@ const server = http.createServer(function(req, res) {
});
server.listen(0, function() {
-
const req = http.request({
method: 'POST',
port: this.address().port
diff --git a/test/parallel/test-net-dns-error.js b/test/parallel/test-net-dns-error.js
index a5ae415592f..0d943bf6cd5 100644
--- a/test/parallel/test-net-dns-error.js
+++ b/test/parallel/test-net-dns-error.js
@@ -21,27 +21,20 @@
'use strict';
const common = require('../common');
-const assert = require('assert');
+const assert = require('assert');
const net = require('net');
const host = '*'.repeat(256);
+const errCode = common.isOpenBSD ? 'EAI_FAIL' : 'ENOTFOUND';
-let errCode = 'ENOTFOUND';
-if (common.isOpenBSD)
- errCode = 'EAI_FAIL';
-
-function do_not_call() {
- throw new Error('This function should not have been called.');
-}
-
-const socket = net.connect(42, host, do_not_call);
+const socket = net.connect(42, host, common.mustNotCall());
socket.on('error', common.mustCall(function(err) {
assert.strictEqual(err.code, errCode);
}));
-socket.on('lookup', function(err, ip, type) {
+socket.on('lookup', common.mustCall(function(err, ip, type) {
assert(err instanceof Error);
assert.strictEqual(err.code, errCode);
assert.strictEqual(ip, undefined);
assert.strictEqual(type, undefined);
-});
+}));
diff --git a/test/parallel/test-net-server-pause-on-connect.js b/test/parallel/test-net-server-pause-on-connect.js
index 84cc91d56c3..59c39e8816c 100644
--- a/test/parallel/test-net-server-pause-on-connect.js
+++ b/test/parallel/test-net-server-pause-on-connect.js
@@ -34,7 +34,7 @@ const server1ConnHandler = (socket) => {
assert.fail('data event should not have happened yet');
}
- assert.strictEqual(data.toString(), msg, 'invalid data received');
+ assert.strictEqual(data.toString(), msg);
socket.end();
server1.close();
});
@@ -46,12 +46,11 @@ const server1 = net.createServer({ pauseOnConnect: true }, server1ConnHandler);
const server2ConnHandler = (socket) => {
socket.on('data', function(data) {
- assert.strictEqual(data.toString(), msg, 'invalid data received');
+ assert.strictEqual(data.toString(), msg);
socket.end();
server2.close();
- assert.strictEqual(server1Sock.bytesRead, 0,
- 'no data should have been read yet');
+ assert.strictEqual(server1Sock.bytesRead, 0);
server1Sock.resume();
stopped = false;
});
diff --git a/test/parallel/test-tls-cnnic-whitelist.js b/test/parallel/test-tls-cnnic-whitelist.js
index 80f188f3667..37a96e4eb7e 100644
--- a/test/parallel/test-tls-cnnic-whitelist.js
+++ b/test/parallel/test-tls-cnnic-whitelist.js
@@ -14,7 +14,7 @@ function loadPEM(n) {
}
const testCases = [
- { // Test 0: for the check of a cert not existed in the whitelist.
+ { // Test 0: for the check of a cert not in the whitelist.
// agent7-cert.pem is issued by the fake CNNIC root CA so that its
// hash is not listed in the whitelist.
// fake-cnnic-root-cert has the same subject name as the original