diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2336deb7282..2f63dd29d79 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -242,6 +242,10 @@ who you are:
 $ git config --global user.name "J. Random User"
 $ git config --global user.email "j.random.user@example.com"
 ```
+Please make sure this local email is also added to your
+[GitHub email list](https://github.com/settings/emails) so that your commits
+will be properly associated with your account and you will be promoted
+to Contributor once your first commit is landed.
 
 #### Step 1: Fork
 
diff --git a/benchmark/misc/punycode.js b/benchmark/misc/punycode.js
index 630aea3195f..40bcd703020 100644
--- a/benchmark/misc/punycode.js
+++ b/benchmark/misc/punycode.js
@@ -1,11 +1,14 @@
 'use strict';
 
 const common = require('../common.js');
-const icu = process.binding('icu');
+let icu;
+try {
+  icu = process.binding('icu');
+} catch (err) {}
 const punycode = require('punycode');
 
 const bench = common.createBenchmark(main, {
-  method: ['punycode', 'icu'],
+  method: ['punycode'].concat(icu !== undefined ? ['icu'] : []),
   n: [1024],
   val: [
     'افغانستا.icom.museum',
@@ -69,8 +72,11 @@ function main(conf) {
       runPunycode(n, val);
       break;
     case 'icu':
-      runICU(n, val);
-      break;
+      if (icu !== undefined) {
+        runICU(n, val);
+        break;
+      }
+      // fallthrough
     default:
       throw new Error('Unexpected method');
   }
diff --git a/common.gypi b/common.gypi
index f5727e51f2e..6278eccdd98 100644
--- a/common.gypi
+++ b/common.gypi
@@ -222,9 +222,6 @@
             'EnableFunctionLevelLinking': 'true',
             'EnableIntrinsicFunctions': 'true',
             'RuntimeTypeInfo': 'false',
-            'AdditionalOptions': [
-              '/MP', # compile across multiple CPUs
-            ],
           },
           'VCLibrarianTool': {
             'AdditionalOptions': [
@@ -257,6 +254,9 @@
         # and their sheer number drowns out other, more legitimate warnings.
         'DisableSpecificWarnings': ['4267'],
         'WarnAsError': 'false',
+        'AdditionalOptions': [
+          '/MP', # compile across multiple CPUs
+        ],
       },
       'VCLibrarianTool': {
       },
diff --git a/doc/api/async_hooks.md b/doc/api/async_hooks.md
index f993b75afff..ed8a9f2e3ca 100644
--- a/doc/api/async_hooks.md
+++ b/doc/api/async_hooks.md
@@ -636,6 +636,6 @@ constructor.
 
 [`after` callback]: #async_hooks_after_asyncid
 [`before` callback]: #async_hooks_before_asyncid
-[`destroy` callback]: #async_hooks_before_asyncid
+[`destroy` callback]: #async_hooks_destroy_asyncid
 [`init` callback]: #async_hooks_init_asyncid_type_triggerasyncid_resource
 [Hook Callbacks]: #async_hooks_hook_callbacks
diff --git a/doc/api/stream.md b/doc/api/stream.md
index 8a758d75f35..d2af5cd9337 100644
--- a/doc/api/stream.md
+++ b/doc/api/stream.md
@@ -1437,9 +1437,12 @@ user programs.
 added: v8.0.0
 -->
 
-* `err` {Error} An error.
-* `callback` {Function} A callback function that takes an optional error argument
-  which is invoked when the writable is destroyed.
+* `err` {Error} A possible error.
+* `callback` {Function} A callback function that takes an optional error
+  argument.
+
+The `_destroy()` method is called by [`writable.destroy()`][writable-destroy].
+It can be overriden by child classes but it **must not** be called directly.
 
 #### writable.\_final(callback)
 <!-- YAML
@@ -1606,9 +1609,12 @@ user programs.
 added: v8.0.0
 -->
 
-* `err` {Error} An error.
+* `err` {Error} A possible error.
 * `callback` {Function} A callback function that takes an optional error
-  argument which is invoked when the readable is destroyed.
+  argument.
+
+The `_destroy()` method is called by [`readable.destroy()`][readable-destroy].
+It can be overriden by child classes but it **must not** be called directly.
 
 #### readable.push(chunk[, encoding])
 <!-- YAML
@@ -2232,4 +2238,6 @@ contain multi-byte characters.
 [stream-resume]: #stream_readable_resume
 [stream-write]: #stream_writable_write_chunk_encoding_callback
 [readable-_destroy]: #stream_readable_destroy_err_callback
+[readable-destroy]: #stream_readable_destroy_error
 [writable-_destroy]: #stream_writable_destroy_err_callback
+[writable-destroy]: #stream_writable_destroy_error
diff --git a/lib/_tls_wrap.js b/lib/_tls_wrap.js
index b5ac8a5c9bc..d7e349b239c 100644
--- a/lib/_tls_wrap.js
+++ b/lib/_tls_wrap.js
@@ -455,9 +455,8 @@ TLSSocket.prototype._init = function(socket, wrap) {
   var ssl = this._handle;
 
   // lib/net.js expect this value to be non-zero if write hasn't been flushed
-  // immediately
-  // TODO(indutny): revise this solution, it might be 1 before handshake and
-  // represent real writeQueueSize during regular writes.
+  // immediately. After the handshake is done this will represent the actual
+  // write queue size
   ssl.writeQueueSize = 1;
 
   this.server = options.server;
diff --git a/lib/events.js b/lib/events.js
index c7bd15dc5bd..6efc29108c3 100644
--- a/lib/events.js
+++ b/lib/events.js
@@ -22,6 +22,7 @@
 'use strict';
 
 var domain;
+var spliceOne;
 
 function EventEmitter() {
   EventEmitter.init.call(this);
@@ -54,9 +55,6 @@ Object.defineProperty(EventEmitter, 'defaultMaxListeners', {
     return defaultMaxListeners;
   },
   set: function(arg) {
-    // force global console to be compiled.
-    // see https://github.com/nodejs/node/issues/4467
-    console;
     // check whether the input is a positive number (whose value is zero or
     // greater and not a NaN).
     if (typeof arg !== 'number' || arg < 0 || arg !== arg) {
@@ -77,7 +75,8 @@ EventEmitter.init = function() {
     }
   }
 
-  if (!this._events || this._events === Object.getPrototypeOf(this)._events) {
+  if (this._events === undefined ||
+      this._events === Object.getPrototypeOf(this)._events) {
     this._events = Object.create(null);
     this._eventsCount = 0;
   }
@@ -163,24 +162,23 @@ function emitMany(handler, isFn, self, args) {
   }
 }
 
-EventEmitter.prototype.emit = function emit(type) {
-  var er, handler, len, args, i, events, domain;
-  var needDomainExit = false;
-  var doError = (type === 'error');
+EventEmitter.prototype.emit = function emit(type, ...args) {
+  let doError = (type === 'error');
 
-  events = this._events;
-  if (events)
-    doError = (doError && events.error == null);
+  const events = this._events;
+  if (events !== undefined)
+    doError = (doError && events.error === undefined);
   else if (!doError)
     return false;
 
-  domain = this.domain;
+  const domain = this.domain;
 
   // If there is no 'error' event listener then throw.
   if (doError) {
-    if (arguments.length > 1)
-      er = arguments[1];
-    if (domain) {
+    let er;
+    if (args.length > 0)
+      er = args[0];
+    if (domain !== null && domain !== undefined) {
       if (!er) {
         const errors = lazyErrors();
         er = new errors.Error('ERR_UNHANDLED_ERROR');
@@ -203,37 +201,32 @@ EventEmitter.prototype.emit = function emit(type) {
     return false;
   }
 
-  handler = events[type];
+  const handler = events[type];
 
-  if (!handler)
+  if (handler === undefined)
     return false;
 
-  if (domain && this !== process) {
+  let needDomainExit = false;
+  if (domain !== null && domain !== undefined && this !== process) {
     domain.enter();
     needDomainExit = true;
   }
 
-  var isFn = typeof handler === 'function';
-  len = arguments.length;
-  switch (len) {
-    // fast cases
-    case 1:
+  const isFn = typeof handler === 'function';
+  switch (args.length) {
+    case 0:
       emitNone(handler, isFn, this);
       break;
+    case 1:
+      emitOne(handler, isFn, this, args[0]);
+      break;
     case 2:
-      emitOne(handler, isFn, this, arguments[1]);
+      emitTwo(handler, isFn, this, args[0], args[1]);
       break;
     case 3:
-      emitTwo(handler, isFn, this, arguments[1], arguments[2]);
+      emitThree(handler, isFn, this, args[0], args[1], args[2]);
       break;
-    case 4:
-      emitThree(handler, isFn, this, arguments[1], arguments[2], arguments[3]);
-      break;
-    // slower
     default:
-      args = new Array(len - 1);
-      for (i = 1; i < len; i++)
-        args[i - 1] = arguments[i];
       emitMany(handler, isFn, this, args);
   }
 
@@ -254,13 +247,13 @@ function _addListener(target, type, listener, prepend) {
   }
 
   events = target._events;
-  if (!events) {
+  if (events === undefined) {
     events = target._events = Object.create(null);
     target._eventsCount = 0;
   } else {
     // To avoid recursion in the case that type === "newListener"! Before
     // adding it to the listeners, first emit "newListener".
-    if (events.newListener) {
+    if (events.newListener !== undefined) {
       target.emit('newListener', type,
                   listener.listener ? listener.listener : listener);
 
@@ -271,7 +264,7 @@ function _addListener(target, type, listener, prepend) {
     existing = events[type];
   }
 
-  if (!existing) {
+  if (existing === undefined) {
     // Optimize the case of one listener. Don't need the extra array object.
     existing = events[type] = listener;
     ++target._eventsCount;
@@ -335,10 +328,7 @@ function onceWrapper() {
         return this.listener.call(this.target, arguments[0], arguments[1],
                                   arguments[2]);
       default:
-        const args = new Array(arguments.length);
-        for (var i = 0; i < args.length; ++i)
-          args[i] = arguments[i];
-        this.listener.apply(this.target, args);
+        this.listener.apply(this.target, arguments);
     }
   }
 }
@@ -383,11 +373,11 @@ EventEmitter.prototype.removeListener =
       }
 
       events = this._events;
-      if (!events)
+      if (events === undefined)
         return this;
 
       list = events[type];
-      if (!list)
+      if (list === undefined)
         return this;
 
       if (list === listener || list.listener === listener) {
@@ -414,13 +404,16 @@ EventEmitter.prototype.removeListener =
 
         if (position === 0)
           list.shift();
-        else
+        else {
+          if (spliceOne === undefined)
+            spliceOne = require('internal/util').spliceOne;
           spliceOne(list, position);
+        }
 
         if (list.length === 1)
           events[type] = list[0];
 
-        if (events.removeListener)
+        if (events.removeListener !== undefined)
           this.emit('removeListener', type, originalListener || listener);
       }
 
@@ -432,15 +425,15 @@ EventEmitter.prototype.removeAllListeners =
       var listeners, events, i;
 
       events = this._events;
-      if (!events)
+      if (events === undefined)
         return this;
 
       // not listening for removeListener, no need to emit
-      if (!events.removeListener) {
+      if (events.removeListener === undefined) {
         if (arguments.length === 0) {
           this._events = Object.create(null);
           this._eventsCount = 0;
-        } else if (events[type]) {
+        } else if (events[type] !== undefined) {
           if (--this._eventsCount === 0)
             this._events = Object.create(null);
           else
@@ -468,7 +461,7 @@ EventEmitter.prototype.removeAllListeners =
 
       if (typeof listeners === 'function') {
         this.removeListener(type, listeners);
-      } else if (listeners) {
+      } else if (listeners !== undefined) {
         // LIFO order
         for (i = listeners.length - 1; i >= 0; i--) {
           this.removeListener(type, listeners[i]);
@@ -479,23 +472,19 @@ EventEmitter.prototype.removeAllListeners =
     };
 
 EventEmitter.prototype.listeners = function listeners(type) {
-  var evlistener;
-  var ret;
-  var events = this._events;
+  const events = this._events;
 
-  if (!events)
-    ret = [];
-  else {
-    evlistener = events[type];
-    if (!evlistener)
-      ret = [];
-    else if (typeof evlistener === 'function')
-      ret = [evlistener.listener || evlistener];
-    else
-      ret = unwrapListeners(evlistener);
-  }
+  if (events === undefined)
+    return [];
 
-  return ret;
+  const evlistener = events[type];
+  if (evlistener === undefined)
+    return [];
+
+  if (typeof evlistener === 'function')
+    return [evlistener.listener || evlistener];
+
+  return unwrapListeners(evlistener);
 };
 
 EventEmitter.listenerCount = function(emitter, type) {
@@ -510,12 +499,12 @@ EventEmitter.prototype.listenerCount = listenerCount;
 function listenerCount(type) {
   const events = this._events;
 
-  if (events) {
+  if (events !== undefined) {
     const evlistener = events[type];
 
     if (typeof evlistener === 'function') {
       return 1;
-    } else if (evlistener) {
+    } else if (evlistener !== undefined) {
       return evlistener.length;
     }
   }
@@ -527,13 +516,6 @@ EventEmitter.prototype.eventNames = function eventNames() {
   return this._eventsCount > 0 ? Reflect.ownKeys(this._events) : [];
 };
 
-// About 1.5x faster than the two-arg version of Array#splice().
-function spliceOne(list, index) {
-  for (var i = index, k = i + 1, n = list.length; k < n; i += 1, k += 1)
-    list[i] = list[k];
-  list.pop();
-}
-
 function arrayClone(arr, n) {
   var copy = new Array(n);
   for (var i = 0; i < n; ++i)
diff --git a/lib/internal/loader/Loader.js b/lib/internal/loader/Loader.js
index 57c70188d66..b1b99814f4f 100644
--- a/lib/internal/loader/Loader.js
+++ b/lib/internal/loader/Loader.js
@@ -47,6 +47,7 @@ class Loader {
       throw new errors.TypeError('ERR_INVALID_ARG_TYPE',
                                  'parentURL', 'string');
     }
+
     const { url, format } = await this.resolver(specifier, parentURL,
                                                 ModuleRequest.resolve);
 
diff --git a/lib/internal/loader/ModuleRequest.js b/lib/internal/loader/ModuleRequest.js
index 88e48ae9d3e..72f3dd3ee57 100644
--- a/lib/internal/loader/ModuleRequest.js
+++ b/lib/internal/loader/ModuleRequest.js
@@ -88,7 +88,14 @@ exports.resolve = (specifier, parentURL) => {
     };
   }
 
-  let url = search(specifier, parentURL);
+  let url;
+  try {
+    url = search(specifier, parentURL);
+  } catch (e) {
+    if (e.message && e.message.startsWith('Cannot find module'))
+      e.code = 'MODULE_NOT_FOUND';
+    throw e;
+  }
 
   if (url.protocol !== 'file:') {
     throw new errors.Error('ERR_INVALID_PROTOCOL',
diff --git a/lib/internal/util.js b/lib/internal/util.js
index f2d4b1facd3..4d1fcb3a608 100644
--- a/lib/internal/util.js
+++ b/lib/internal/util.js
@@ -271,6 +271,13 @@ function join(output, separator) {
   return str;
 }
 
+// About 1.5x faster than the two-arg version of Array#splice().
+function spliceOne(list, index) {
+  for (var i = index, k = i + 1, n = list.length; k < n; i += 1, k += 1)
+    list[i] = list[k];
+  list.pop();
+}
+
 module.exports = {
   assertCrypto,
   cachedResult,
@@ -281,10 +288,11 @@ module.exports = {
   filterDuplicateStrings,
   getConstructorOf,
   isError,
+  join,
   normalizeEncoding,
   objectToString,
   promisify,
-  join,
+  spliceOne,
 
   // Symbol used to customize promisify conversion
   customPromisifyArgs: kCustomPromisifyArgsSymbol,
diff --git a/lib/module.js b/lib/module.js
index 92b8fbbc392..d210c916252 100644
--- a/lib/module.js
+++ b/lib/module.js
@@ -424,39 +424,28 @@ Module._load = function(request, parent, isMain) {
     debug('Module._load REQUEST %s parent: %s', request, parent.id);
   }
 
-  var filename = null;
-
-  if (isMain) {
-    try {
-      filename = Module._resolveFilename(request, parent, isMain);
-    } catch (e) {
-      // try to keep stack
-      e.stack;
-      throw e;
-    }
-    if (experimentalModules) {
-      (async () => {
-        // loader setup
-        if (!ESMLoader) {
-          ESMLoader = new Loader();
-          const userLoader = process.binding('config').userLoader;
-          if (userLoader) {
-            const hooks = await new Loader().import(userLoader);
-            ESMLoader.hook(hooks);
-          }
+  if (isMain && experimentalModules) {
+    (async () => {
+      // loader setup
+      if (!ESMLoader) {
+        ESMLoader = new Loader();
+        const userLoader = process.binding('config').userLoader;
+        if (userLoader) {
+          const hooks = await new Loader().import(userLoader);
+          ESMLoader.hook(hooks);
         }
-        await ESMLoader.import(getURLFromFilePath(filename).href);
-      })()
-      .catch((e) => {
-        console.error(e);
-        process.exit(1);
-      });
-      return;
-    }
-  } else {
-    filename = Module._resolveFilename(request, parent, isMain);
+      }
+      await ESMLoader.import(getURLFromFilePath(request).href);
+    })()
+    .catch((e) => {
+      console.error(e);
+      process.exit(1);
+    });
+    return;
   }
 
+  var filename = Module._resolveFilename(request, parent, isMain);
+
   var cachedModule = Module._cache[filename];
   if (cachedModule) {
     updateChildren(parent, cachedModule, true);
diff --git a/lib/net.js b/lib/net.js
index 53b9d33f485..5356357fc97 100644
--- a/lib/net.js
+++ b/lib/net.js
@@ -397,6 +397,14 @@ Socket.prototype.setTimeout = function(msecs, callback) {
 
 
 Socket.prototype._onTimeout = function() {
+  // `.prevWriteQueueSize` !== `.updateWriteQueueSize()` means there is
+  // an active write in progress, so we suppress the timeout.
+  const prevWriteQueueSize = this._handle.writeQueueSize;
+  if (prevWriteQueueSize > 0 &&
+      prevWriteQueueSize !== this._handle.updateWriteQueueSize()) {
+    this._unrefTimer();
+    return;
+  }
   debug('_onTimeout');
   this.emit('timeout');
 };
diff --git a/lib/url.js b/lib/url.js
index 72e03e0f9c0..3734a0cad66 100644
--- a/lib/url.js
+++ b/lib/url.js
@@ -28,6 +28,8 @@ const { hexTable } = require('internal/querystring');
 
 const errors = require('internal/errors');
 
+const { spliceOne } = require('internal/util');
+
 // WHATWG URL implementation provided by internal/url
 const {
   URL,
@@ -950,13 +952,6 @@ Url.prototype.parseHost = function parseHost() {
   if (host) this.hostname = host;
 };
 
-// About 1.5x faster than the two-arg version of Array#splice().
-function spliceOne(list, index) {
-  for (var i = index, k = i + 1, n = list.length; k < n; i += 1, k += 1)
-    list[i] = list[k];
-  list.pop();
-}
-
 // These characters do not need escaping:
 // ! - . _ ~
 // ' ( ) * :
diff --git a/src/module_wrap.cc b/src/module_wrap.cc
index 829248b681c..bda760369e9 100644
--- a/src/module_wrap.cc
+++ b/src/module_wrap.cc
@@ -442,6 +442,11 @@ URL resolve_directory(const URL& search, bool read_pkg_json) {
 URL Resolve(std::string specifier, const URL* base, bool read_pkg_json) {
   URL pure_url(specifier);
   if (!(pure_url.flags() & URL_FLAGS_FAILED)) {
+    // just check existence, without altering
+    auto check = check_file(pure_url, true);
+    if (check.failed) {
+      return URL("");
+    }
     return pure_url;
   }
   if (specifier.length() == 0) {
@@ -493,9 +498,8 @@ void ModuleWrap::Resolve(const FunctionCallbackInfo<Value>& args) {
 
   URL result = node::loader::Resolve(*specifier_utf, &url, true);
   if (result.flags() & URL_FLAGS_FAILED) {
-    std::string msg = "module ";
+    std::string msg = "Cannot find module ";
     msg += *specifier_utf;
-    msg += " not found";
     env->ThrowError(msg.c_str());
     return;
   }
diff --git a/src/node.cc b/src/node.cc
index fe0b7e808fd..aaf1e853d08 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -4924,9 +4924,9 @@ Local<Context> NewContext(Isolate* isolate,
   auto intl_key = FIXED_ONE_BYTE_STRING(isolate, "Intl");
   auto break_iter_key = FIXED_ONE_BYTE_STRING(isolate, "v8BreakIterator");
   Local<Value> intl_v;
-  Local<Object> intl;
   if (context->Global()->Get(context, intl_key).ToLocal(&intl_v) &&
-      intl_v->ToObject(context).ToLocal(&intl)) {
+      intl_v->IsObject()) {
+    Local<Object> intl = intl_v.As<Object>();
     intl->Delete(context, break_iter_key).FromJust();
   }
   return context;
diff --git a/src/stream_wrap.cc b/src/stream_wrap.cc
index 660702eb354..0107cbad2d9 100644
--- a/src/stream_wrap.cc
+++ b/src/stream_wrap.cc
@@ -104,6 +104,7 @@ LibuvStreamWrap::LibuvStreamWrap(Environment* env,
 void LibuvStreamWrap::AddMethods(Environment* env,
                                  v8::Local<v8::FunctionTemplate> target,
                                  int flags) {
+  env->SetProtoMethod(target, "updateWriteQueueSize", UpdateWriteQueueSize);
   env->SetProtoMethod(target, "setBlocking", SetBlocking);
   StreamBase::AddMethods<LibuvStreamWrap>(env, target, flags);
 }
@@ -144,11 +145,14 @@ bool LibuvStreamWrap::IsIPCPipe() {
 }
 
 
-void LibuvStreamWrap::UpdateWriteQueueSize() {
+uint32_t LibuvStreamWrap::UpdateWriteQueueSize() {
   HandleScope scope(env()->isolate());
-  Local<Integer> write_queue_size =
-      Integer::NewFromUnsigned(env()->isolate(), stream()->write_queue_size);
-  object()->Set(env()->write_queue_size_string(), write_queue_size);
+  uint32_t write_queue_size = stream()->write_queue_size;
+  object()->Set(env()->context(),
+                env()->write_queue_size_string(),
+                Integer::NewFromUnsigned(env()->isolate(),
+                                         write_queue_size)).FromJust();
+  return write_queue_size;
 }
 
 
@@ -273,6 +277,16 @@ void LibuvStreamWrap::OnRead(uv_stream_t* handle,
 }
 
 
+void LibuvStreamWrap::UpdateWriteQueueSize(
+    const FunctionCallbackInfo<Value>& args) {
+  LibuvStreamWrap* wrap;
+  ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());
+
+  uint32_t write_queue_size = wrap->UpdateWriteQueueSize();
+  args.GetReturnValue().Set(write_queue_size);
+}
+
+
 void LibuvStreamWrap::SetBlocking(const FunctionCallbackInfo<Value>& args) {
   LibuvStreamWrap* wrap;
   ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());
diff --git a/src/stream_wrap.h b/src/stream_wrap.h
index d8fbcf709a8..43df504e81b 100644
--- a/src/stream_wrap.h
+++ b/src/stream_wrap.h
@@ -84,13 +84,15 @@ class LibuvStreamWrap : public HandleWrap, public StreamBase {
   }
 
   AsyncWrap* GetAsyncWrap() override;
-  void UpdateWriteQueueSize();
+  uint32_t UpdateWriteQueueSize();
 
   static void AddMethods(Environment* env,
                          v8::Local<v8::FunctionTemplate> target,
                          int flags = StreamBase::kFlagNone);
 
  private:
+  static void UpdateWriteQueueSize(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
   static void SetBlocking(const v8::FunctionCallbackInfo<v8::Value>& args);
 
   // Callbacks for libuv
diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc
index 738bd040718..63e3494047a 100644
--- a/src/tls_wrap.cc
+++ b/src/tls_wrap.cc
@@ -42,6 +42,7 @@ using v8::Exception;
 using v8::Function;
 using v8::FunctionCallbackInfo;
 using v8::FunctionTemplate;
+using v8::Integer;
 using v8::Local;
 using v8::Object;
 using v8::String;
@@ -297,6 +298,7 @@ void TLSWrap::EncOut() {
 
   // No data to write
   if (BIO_pending(enc_out_) == 0) {
+    UpdateWriteQueueSize();
     if (clear_in_->Length() == 0)
       InvokeQueued(0);
     return;
@@ -551,6 +553,18 @@ bool TLSWrap::IsClosing() {
 }
 
 
+uint32_t TLSWrap::UpdateWriteQueueSize(uint32_t write_queue_size) {
+  HandleScope scope(env()->isolate());
+  if (write_queue_size == 0)
+    write_queue_size = BIO_pending(enc_out_);
+  object()->Set(env()->context(),
+                env()->write_queue_size_string(),
+                Integer::NewFromUnsigned(env()->isolate(),
+                                         write_queue_size)).FromJust();
+  return write_queue_size;
+}
+
+
 int TLSWrap::ReadStart() {
   return stream_->ReadStart();
 }
@@ -591,8 +605,12 @@ int TLSWrap::DoWrite(WriteWrap* w,
     ClearOut();
     // However, if there is any data that should be written to the socket,
     // the callback should not be invoked immediately
-    if (BIO_pending(enc_out_) == 0)
+    if (BIO_pending(enc_out_) == 0) {
+      // net.js expects writeQueueSize to be > 0 if the write isn't
+      // immediately flushed
+      UpdateWriteQueueSize(1);
       return stream_->DoWrite(w, bufs, count, send_handle);
+    }
   }
 
   // Queue callback to execute it on next tick
@@ -642,13 +660,15 @@ int TLSWrap::DoWrite(WriteWrap* w,
 
   // Try writing data immediately
   EncOut();
+  UpdateWriteQueueSize();
 
   return 0;
 }
 
 
 void TLSWrap::OnAfterWriteImpl(WriteWrap* w, void* ctx) {
-  // Intentionally empty
+  TLSWrap* wrap = static_cast<TLSWrap*>(ctx);
+  wrap->UpdateWriteQueueSize();
 }
 
 
@@ -912,6 +932,15 @@ int TLSWrap::SelectSNIContextCallback(SSL* s, int* ad, void* arg) {
 #endif  // SSL_CTRL_SET_TLSEXT_SERVERNAME_CB
 
 
+void TLSWrap::UpdateWriteQueueSize(const FunctionCallbackInfo<Value>& args) {
+  TLSWrap* wrap;
+  ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());
+
+  uint32_t write_queue_size = wrap->UpdateWriteQueueSize();
+  args.GetReturnValue().Set(write_queue_size);
+}
+
+
 void TLSWrap::Initialize(Local<Object> target,
                          Local<Value> unused,
                          Local<Context> context) {
@@ -938,6 +967,7 @@ void TLSWrap::Initialize(Local<Object> target,
   env->SetProtoMethod(t, "enableSessionCallbacks", EnableSessionCallbacks);
   env->SetProtoMethod(t, "destroySSL", DestroySSL);
   env->SetProtoMethod(t, "enableCertCb", EnableCertCb);
+  env->SetProtoMethod(t, "updateWriteQueueSize", UpdateWriteQueueSize);
 
   StreamBase::AddMethods<TLSWrap>(env, t, StreamBase::kFlagHasWritev);
   SSLWrap<TLSWrap>::AddMethods(env, t);
diff --git a/src/tls_wrap.h b/src/tls_wrap.h
index fe3abe04f55..99d2dc9121f 100644
--- a/src/tls_wrap.h
+++ b/src/tls_wrap.h
@@ -132,6 +132,7 @@ class TLSWrap : public AsyncWrap,
 
   AsyncWrap* GetAsyncWrap() override;
   bool IsIPCPipe() override;
+  uint32_t UpdateWriteQueueSize(uint32_t write_queue_size = 0);
 
   // Resource implementation
   static void OnAfterWriteImpl(WriteWrap* w, void* ctx);
@@ -187,6 +188,10 @@ class TLSWrap : public AsyncWrap,
   // If true - delivered EOF to the js-land, either after `close_notify`, or
   // after the `UV_EOF` on socket.
   bool eof_;
+
+ private:
+  static void UpdateWriteQueueSize(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
 };
 
 }  // namespace node
diff --git a/test/es-module/test-esm-preserve-symlinks-not-found-plain.mjs b/test/es-module/test-esm-preserve-symlinks-not-found-plain.mjs
new file mode 100644
index 00000000000..bfeb71ef3a6
--- /dev/null
+++ b/test/es-module/test-esm-preserve-symlinks-not-found-plain.mjs
@@ -0,0 +1,3 @@
+// Flags: --experimental-modules --loader ./test/fixtures/es-module-loaders/not-found-assert-loader.mjs
+/* eslint-disable required-modules */
+import './not-found.js';
diff --git a/test/es-module/test-esm-preserve-symlinks-not-found.mjs b/test/es-module/test-esm-preserve-symlinks-not-found.mjs
new file mode 100644
index 00000000000..22c888028e7
--- /dev/null
+++ b/test/es-module/test-esm-preserve-symlinks-not-found.mjs
@@ -0,0 +1,3 @@
+// Flags: --experimental-modules --loader ./test/fixtures/es-module-loaders/not-found-assert-loader.mjs
+/* eslint-disable required-modules */
+import './not-found';
diff --git a/test/fixtures/es-module-loaders/not-found-assert-loader.mjs b/test/fixtures/es-module-loaders/not-found-assert-loader.mjs
new file mode 100644
index 00000000000..7718cc7c4ba
--- /dev/null
+++ b/test/fixtures/es-module-loaders/not-found-assert-loader.mjs
@@ -0,0 +1,22 @@
+import assert from 'assert';
+
+// a loader that asserts that the defaultResolve will throw "not found"
+// (skipping the top-level main of course)
+let mainLoad = true;
+export async function resolve (specifier, base, defaultResolve) {
+  if (mainLoad) {
+    mainLoad = false;
+    return defaultResolve(specifier, base);
+  }
+  try {
+    await defaultResolve(specifier, base);
+  }
+  catch (e) {
+    assert.equal(e.code, 'MODULE_NOT_FOUND');
+    return {
+      format: 'builtin',
+      url: 'fs'
+    };
+  }
+  assert.fail(`Module resolution for ${specifier} should be throw MODULE_NOT_FOUND`);
+}
diff --git a/test/parallel/test-module-main-fail.js b/test/parallel/test-module-main-fail.js
new file mode 100644
index 00000000000..a6457f33b65
--- /dev/null
+++ b/test/parallel/test-module-main-fail.js
@@ -0,0 +1,21 @@
+'use strict';
+require('../common');
+const assert = require('assert');
+const { execFileSync } = require('child_process');
+
+const entryPoints = ['iDoNotExist', 'iDoNotExist.js', 'iDoNotExist.mjs'];
+const flags = [[], ['--experimental-modules']];
+const node = process.argv[0];
+
+for (const args of flags) {
+  for (const entryPoint of entryPoints) {
+    try {
+      execFileSync(node, args.concat(entryPoint), { stdio: 'pipe' });
+    } catch (e) {
+      assert(e.toString().match(/Error: Cannot find module/));
+      continue;
+    }
+    assert.fail('Executing node with inexistent entry point should ' +
+                `fail. Entry point: ${entryPoint}, Flags: [${args}]`);
+  }
+}
diff --git a/test/parallel/test-module-main-preserve-symlinks-fail.js b/test/parallel/test-module-main-preserve-symlinks-fail.js
new file mode 100644
index 00000000000..b46497b6252
--- /dev/null
+++ b/test/parallel/test-module-main-preserve-symlinks-fail.js
@@ -0,0 +1,21 @@
+'use strict';
+require('../common');
+const assert = require('assert');
+const { execFileSync } = require('child_process');
+
+const entryPoints = ['iDoNotExist', 'iDoNotExist.js', 'iDoNotExist.mjs'];
+const flags = [[], ['--experimental-modules', '--preserve-symlinks']];
+const node = process.argv[0];
+
+for (const args of flags) {
+  for (const entryPoint of entryPoints) {
+    try {
+      execFileSync(node, args.concat(entryPoint));
+    } catch (e) {
+      assert(e.toString().match(/Error: Cannot find module/));
+      continue;
+    }
+    assert.fail('Executing node with inexistent entry point should ' +
+                `fail. Entry point: ${entryPoint}, Flags: [${args}]`);
+  }
+}
diff --git a/test/parallel/test-tls-buffersize.js b/test/parallel/test-tls-buffersize.js
new file mode 100644
index 00000000000..49848cd865a
--- /dev/null
+++ b/test/parallel/test-tls-buffersize.js
@@ -0,0 +1,43 @@
+'use strict';
+const common = require('../common');
+if (!common.hasCrypto)
+  common.skip('missing crypto');
+const assert = require('assert');
+const fixtures = require('../common/fixtures');
+const tls = require('tls');
+
+const iter = 10;
+const overhead = 30;
+
+const server = tls.createServer({
+  key: fixtures.readKey('agent2-key.pem'),
+  cert: fixtures.readKey('agent2-cert.pem')
+}, common.mustCall((socket) => {
+  socket.on('readable', common.mustCallAtLeast(() => {
+    socket.read();
+  }, 1));
+
+  socket.on('end', common.mustCall(() => {
+    server.close();
+  }));
+}));
+
+server.listen(0, common.mustCall(() => {
+  const client = tls.connect({
+    port: server.address().port,
+    rejectUnauthorized: false
+  }, common.mustCall(() => {
+    assert.strictEqual(client.bufferSize, 0);
+
+    for (let i = 1; i < iter; i++) {
+      client.write('a');
+      assert.strictEqual(client.bufferSize, i + overhead);
+    }
+
+    client.on('finish', common.mustCall(() => {
+      assert.strictEqual(client.bufferSize, 0);
+    }));
+
+    client.end();
+  }));
+}));
diff --git a/test/parallel/test-tls-delayed-attach.js b/test/parallel/test-tls-delayed-attach.js
index 9ab61156658..fc5eaaa884c 100644
--- a/test/parallel/test-tls-delayed-attach.js
+++ b/test/parallel/test-tls-delayed-attach.js
@@ -24,17 +24,17 @@ const common = require('../common');
 if (!common.hasCrypto)
   common.skip('missing crypto');
 
+const fixtures = require('../common/fixtures');
 const assert = require('assert');
 const tls = require('tls');
-const fs = require('fs');
 const net = require('net');
 
 const sent = 'hello world';
 let received = '';
 
 const options = {
-  key: fs.readFileSync(`${common.fixturesDir}/keys/agent1-key.pem`),
-  cert: fs.readFileSync(`${common.fixturesDir}/keys/agent1-cert.pem`)
+  key: fixtures.readKey('agent1-key.pem'),
+  cert: fixtures.readKey('agent1-cert.pem')
 };
 
 const server = net.createServer(function(c) {
diff --git a/test/sequential/test-http-keep-alive-large-write.js b/test/sequential/test-http-keep-alive-large-write.js
new file mode 100644
index 00000000000..2cdf539e76b
--- /dev/null
+++ b/test/sequential/test-http-keep-alive-large-write.js
@@ -0,0 +1,80 @@
+'use strict';
+const common = require('../common');
+const assert = require('assert');
+const http = require('http');
+
+// This test assesses whether long-running writes can complete
+// or timeout because the socket is not aware that the backing
+// stream is still writing.
+// To simulate a slow client, we write a really large chunk and
+// then proceed through the following cycle:
+// 1) Receive first 'data' event and record currently written size
+// 2) Once we've read up to currently written size recorded above,
+//    we pause the stream and wait longer than the server timeout
+// 3) Socket.prototype._onTimeout triggers and should confirm
+//    that the backing stream is still active and writing
+// 4) Our timer fires, we resume the socket and start at 1)
+
+const minReadSize = 250000;
+const serverTimeout = common.platformTimeout(500);
+let offsetTimeout = common.platformTimeout(100);
+let serverConnectionHandle;
+let writeSize = 3000000;
+let didReceiveData = false;
+// this represents each cycles write size, where the cycle consists
+// of `write > read > _onTimeout`
+let currentWriteSize = 0;
+
+const server = http.createServer(common.mustCall((req, res) => {
+  const content = Buffer.alloc(writeSize, 0x44);
+
+  res.writeHead(200, {
+    'Content-Type': 'application/octet-stream',
+    'Content-Length': content.length.toString(),
+    'Vary': 'Accept-Encoding'
+  });
+
+  serverConnectionHandle = res.socket._handle;
+  res.write(content);
+  res.end();
+}));
+server.setTimeout(serverTimeout);
+server.on('timeout', () => {
+  assert.strictEqual(didReceiveData, false, 'Should not timeout');
+});
+
+server.listen(0, common.mustCall(() => {
+  http.get({
+    path: '/',
+    port: server.address().port
+  }, common.mustCall((res) => {
+    const resume = () => res.resume();
+    let receivedBufferLength = 0;
+    let firstReceivedAt;
+    res.on('data', common.mustCallAtLeast((buf) => {
+      if (receivedBufferLength === 0) {
+        currentWriteSize = Math.max(
+          minReadSize,
+          writeSize - serverConnectionHandle.writeQueueSize
+        );
+        didReceiveData = false;
+        firstReceivedAt = Date.now();
+      }
+      receivedBufferLength += buf.length;
+      if (receivedBufferLength >= currentWriteSize) {
+        didReceiveData = true;
+        writeSize = serverConnectionHandle.writeQueueSize;
+        receivedBufferLength = 0;
+        res.pause();
+        setTimeout(
+          resume,
+          serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
+        );
+        offsetTimeout = 0;
+      }
+    }, 1));
+    res.on('end', common.mustCall(() => {
+      server.close();
+    }));
+  }));
+}));
diff --git a/test/sequential/test-https-keep-alive-large-write.js b/test/sequential/test-https-keep-alive-large-write.js
new file mode 100644
index 00000000000..88468dc03fc
--- /dev/null
+++ b/test/sequential/test-https-keep-alive-large-write.js
@@ -0,0 +1,87 @@
+'use strict';
+const common = require('../common');
+if (!common.hasCrypto)
+  common.skip('missing crypto');
+const assert = require('assert');
+const fixtures = require('../common/fixtures');
+const https = require('https');
+
+// This test assesses whether long-running writes can complete
+// or timeout because the socket is not aware that the backing
+// stream is still writing.
+// To simulate a slow client, we write a really large chunk and
+// then proceed through the following cycle:
+// 1) Receive first 'data' event and record currently written size
+// 2) Once we've read up to currently written size recorded above,
+//    we pause the stream and wait longer than the server timeout
+// 3) Socket.prototype._onTimeout triggers and should confirm
+//    that the backing stream is still active and writing
+// 4) Our timer fires, we resume the socket and start at 1)
+
+const minReadSize = 250000;
+const serverTimeout = common.platformTimeout(500);
+let offsetTimeout = common.platformTimeout(100);
+let serverConnectionHandle;
+let writeSize = 2000000;
+let didReceiveData = false;
+// this represents each cycles write size, where the cycle consists
+// of `write > read > _onTimeout`
+let currentWriteSize = 0;
+
+const server = https.createServer({
+  key: fixtures.readKey('agent1-key.pem'),
+  cert: fixtures.readKey('agent1-cert.pem')
+}, common.mustCall((req, res) => {
+  const content = Buffer.alloc(writeSize, 0x44);
+
+  res.writeHead(200, {
+    'Content-Type': 'application/octet-stream',
+    'Content-Length': content.length.toString(),
+    'Vary': 'Accept-Encoding'
+  });
+
+  serverConnectionHandle = res.socket._handle;
+  res.write(content);
+  res.end();
+}));
+server.setTimeout(serverTimeout);
+server.on('timeout', () => {
+  assert.strictEqual(didReceiveData, false, 'Should not timeout');
+});
+
+server.listen(0, common.mustCall(() => {
+  https.get({
+    path: '/',
+    port: server.address().port,
+    rejectUnauthorized: false
+  }, common.mustCall((res) => {
+    const resume = () => res.resume();
+    let receivedBufferLength = 0;
+    let firstReceivedAt;
+    res.on('data', common.mustCallAtLeast((buf) => {
+      if (receivedBufferLength === 0) {
+        currentWriteSize = Math.max(
+          minReadSize,
+          writeSize - serverConnectionHandle.writeQueueSize
+        );
+        didReceiveData = false;
+        firstReceivedAt = Date.now();
+      }
+      receivedBufferLength += buf.length;
+      if (receivedBufferLength >= currentWriteSize) {
+        didReceiveData = true;
+        writeSize = serverConnectionHandle.writeQueueSize;
+        receivedBufferLength = 0;
+        res.pause();
+        setTimeout(
+          resume,
+          serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
+        );
+        offsetTimeout = 0;
+      }
+    }, 1));
+    res.on('end', common.mustCall(() => {
+      server.close();
+    }));
+  }));
+}));