diff --git a/.eslintrc.yml b/.eslintrc.yml index a2ad129399833..b1702c9115eb3 100644 --- a/.eslintrc.yml +++ b/.eslintrc.yml @@ -22,6 +22,7 @@ ignorePatterns: - "src/emrun_postjs.js" - "src/worker.js" - "src/wasm_worker.js" + - "src/audio_worklet.js" - "src/wasm2js.js" - "src/webGLClient.js" - "src/webGLWorker.js" diff --git a/ChangeLog.md b/ChangeLog.md index 3a96486f7d375..707915aeaa623 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -40,6 +40,8 @@ See docs/process.md for more on how version tagging works. - --pre-js and --post-js files are now fed through the JS preprocesor, just like JS library files and the core runtime JS files. This means they can now contain #if/#else/#endif blocks and {{{ }}} macro blocks. (#18525) +- Added support for Wasm-based AudioWorklets for realtime audio processing + (#16449) - `-sEXPORT_ALL` can now be used to export symbols on the `Module` object when used with `-sMINIMA_RUNTIME` and `-sMODULARIZE` together. (#17911) - The llvm version that emscripten uses was updated to 17.0.0 trunk. diff --git a/emcc.py b/emcc.py index f806bfda80f67..0b741e1ae7184 100755 --- a/emcc.py +++ b/emcc.py @@ -2360,7 +2360,7 @@ def phase_linker_setup(options, state, newargs): if settings.WASM_WORKERS: # TODO: After #15982 is resolved, these dependencies can be declared in library_wasm_worker.js # instead of having to record them here. - wasm_worker_imports = ['_emscripten_wasm_worker_initialize'] + wasm_worker_imports = ['_emscripten_wasm_worker_initialize', '___set_thread_state'] settings.EXPORTED_FUNCTIONS += wasm_worker_imports building.user_requested_exports.update(wasm_worker_imports) settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['_wasm_worker_initializeRuntime'] @@ -2369,6 +2369,19 @@ def phase_linker_setup(options, state, newargs): settings.WASM_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.ww.js' settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_wasm_worker.js'))) + settings.SUPPORTS_GLOBALTHIS = feature_matrix.caniuse(feature_matrix.Feature.GLOBALTHIS) + + if settings.AUDIO_WORKLET: + if not settings.SUPPORTS_GLOBALTHIS: + exit_with_error('Must target recent enough browser versions that will support globalThis in order to target Wasm Audio Worklets!') + if settings.AUDIO_WORKLET == 1: + settings.AUDIO_WORKLET_FILE = unsuffixed(os.path.basename(target)) + '.aw.js' + settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_webaudio.js'))) + if not settings.MINIMAL_RUNTIME: + # MINIMAL_RUNTIME exports these manually, since this export mechanism is placed + # in global scope that is not suitable for MINIMAL_RUNTIME loader. + settings.EXPORTED_RUNTIME_METHODS += ['stackSave', 'stackAlloc', 'stackRestore'] + if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME: # when the filesystem is forced, we export by default methods that filesystem usage # may need, including filesystem usage from standalone file packager output (i.e. @@ -3143,6 +3156,17 @@ def phase_final_emitting(options, state, target, wasm_target, memfile): minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True) write_file(worker_output, minified_worker) + # Deploy the Audio Worklet module bootstrap file (*.aw.js) + if settings.AUDIO_WORKLET == 1: + worklet_output = os.path.join(target_dir, settings.AUDIO_WORKLET_FILE) + with open(worklet_output, 'w') as f: + f.write(shared.read_and_preprocess(shared.path_from_root('src', 'audio_worklet.js'), expand_macros=True)) + + # Minify the audio_worklet.js file in optimized builds + if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL: + minified_worker = building.acorn_optimizer(worklet_output, ['minifyWhitespace'], return_output=True) + open(worklet_output, 'w').write(minified_worker) + # track files that will need native eols generated_text_files_with_native_eols = [] @@ -3800,11 +3824,16 @@ def modularize(): return %(return_value)s } +%(capture_module_function_for_audio_worklet)s ''' % { 'maybe_async': async_emit, 'EXPORT_NAME': settings.EXPORT_NAME, 'src': src, - 'return_value': return_value + 'return_value': return_value, + # Given the async nature of how the Module function and Module object come into existence in AudioWorkletGlobalScope, + # store the Module function under a different variable name so that AudioWorkletGlobalScope will be able to reference + # it without aliasing/conflicting with the Module variable name. + 'capture_module_function_for_audio_worklet': 'globalThis.AudioWorkletModule = Module;' if settings.AUDIO_WORKLET and settings.MODULARIZE else '' } if settings.MINIMAL_RUNTIME and not settings.USE_PTHREADS: @@ -3864,14 +3893,15 @@ def module_export_name_substitution(): logger.debug(f'Private module export name substitution with {settings.EXPORT_NAME}') src = read_file(final_js) final_js += '.module_export_name_substitution.js' - if settings.MINIMAL_RUNTIME and not settings.ENVIRONMENT_MAY_BE_NODE and not settings.ENVIRONMENT_MAY_BE_SHELL: + if settings.MINIMAL_RUNTIME and not settings.ENVIRONMENT_MAY_BE_NODE and not settings.ENVIRONMENT_MAY_BE_SHELL and not settings.AUDIO_WORKLET: # On the web, with MINIMAL_RUNTIME, the Module object is always provided # via the shell html in order to provide the .asm.js/.wasm content. replacement = settings.EXPORT_NAME else: replacement = "typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {}" % {"EXPORT_NAME": settings.EXPORT_NAME} - src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src) - write_file(final_js, src) + new_src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src) + assert new_src != src, 'Unable to find Closure syntax __EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__ in source!' + write_file(final_js, new_src) shared.get_temp_files().note(final_js) save_intermediate('module_export_name_substitution') diff --git a/site/source/docs/api_reference/index.rst b/site/source/docs/api_reference/index.rst index d5213e90c8b6b..fa1a64bb218d3 100644 --- a/site/source/docs/api_reference/index.rst +++ b/site/source/docs/api_reference/index.rst @@ -25,6 +25,9 @@ high level it consists of: - :ref:`wasm_workers`: Enables writing multithreaded programs using a web-like API. +- :ref:`wasm_audio_worklets`: + Allows programs to implement audio processing nodes that run in a dedicated real-time audio processing thread context. + - :ref:`Module`: Global JavaScript object that can be used to control code execution and access exported methods. @@ -64,4 +67,5 @@ high level it consists of: fiber.h proxying.h wasm_workers + wasm_audio_worklets advanced-apis diff --git a/site/source/docs/api_reference/wasm_audio_worklets.rst b/site/source/docs/api_reference/wasm_audio_worklets.rst new file mode 100644 index 0000000000000..43b2fe5c1a40d --- /dev/null +++ b/site/source/docs/api_reference/wasm_audio_worklets.rst @@ -0,0 +1,188 @@ +.. _wasm_audio_worklets: + +======================= +Wasm Audio Worklets API +======================= + +The AudioWorklet extension to the `Web Audio API specification +`_ enables web sites +to implement custom AudioWorkletProcessor Web Audio graph node types. + +These custom processor nodes process audio data in real-time as part of the +audio graph processing flow, and enable developers to write low latency +sensitive audio processing code in JavaScript. + +The Emscripten Wasm Audio Worklets API is an Emscripten-specific integration +of these AudioWorklet nodes to WebAssembly. Wasm Audio Worklets enables +developers to implement AudioWorklet processing nodes in C/C++ code that +compile down to WebAssembly, rather than using JavaScript for the task. + +Developing AudioWorkletProcessors in WebAssembly provides the benefit of +improved performance compared to JavaScript, and the Emscripten +Wasm Audio Worklets system runtime has been carefully developed to guarantee +that no temporary JavaScript level VM garbage will be generated, eliminating +the possibility of GC pauses from impacting audio synthesis performance. + +Audio Worklets API is based on the Wasm Workers feature. It is possible to +also enable the `-pthread` option while targeting Audio Worklets, but the +audio worklets will always run in a Wasm Worker, and not in a Pthread. + +Development Overview +==================== + +Authoring Wasm Audio Worklets is similar to developing Audio Worklets +API based applications in JS (see `MDN: Using AudioWorklets `_), with the exception that users will not manually implement +the JS code for the ScriptProcessorNode files in the AudioWorkletGlobalScope. +This is managed automatically by the Emscripten Wasm AudioWorklets runtime. + +Instead, application developers will need to implement a small amount of JS <-> Wasm +(C/C++) interop to interact with the AudioContext and AudioNodes from Wasm. + +Audio Worklets operate on a two layer "class type & its instance" design: +first one defines one or more node types (or classes) called AudioWorkletProcessors, +and then, these processors are instantiated one or more times in the audio +processing graph as AudioWorkletNodes. + +Once a class type is instantiated on the Web Audio graph and the graph is +running, a C/C++ function pointer callback will be invoked for each 128 +samples of the processed audio stream that flows through the node. + +This callback will be executed on a dedicated separate audio processing +thread with real-time processing priority. Each Web Audio context will +utilize only a single audio processing thread. That is, even if there are +multiple audio node instances (maybe from multiple different audio processors), +these will all share the same dedicated audio thread on the AudioContext, +and will not run in a separate thread of their own each. + +Note: the audio worklet node processing is pull-mode callback based. Audio +Worklets do not allow the creation of general purpose real-time prioritized +threads. The audio callback code should execute as quickly as possible and +be non-blocking. In other words, spinning a custom `for(;;)` loop is not +possible. + +Programming Example +=================== + +To get hands-on experience with programming Wasm Audio Worklets, let's create a +simple audio node that outputs random noise through its output channels. + +1. First, we will create a Web Audio context in C/C++ code. This is achieved +via the ``emscripten_create_audio_context()`` function. In a larger application +that integrates existing Web Audio libraries, you may already have an +``AudioContext`` created via some other library, in which case you would instead +register that context to be visible to WebAssembly by calling the function +``emscriptenRegisterAudioObject()``. + +Then, we will instruct the Emscripten runtime to initialize a Wasm Audio Worklet +thread scope on this context. The code to achieve these tasks looks like: + +.. code-block:: cpp + + #include + + uint8_t audioThreadStack[4096]; + + int main() + { + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0); + + emscripten_start_wasm_audio_worklet_thread_async(context, audioThreadStack, sizeof(audioThreadStack), + &AudioThreadInitialized, 0); + } + +2. When the worklet thread context has been initialized, we are ready to define our +own noise generator AudioWorkletProcessor node type: + +.. code-block:: cpp + + void AudioThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) + { + if (!success) return; // Check browser console in a debug build for detailed errors + WebAudioWorkletProcessorCreateOptions opts = { + .name = "noise-generator", + }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, &AudioWorkletProcessorCreated, 0); + } + +3. After the processor has initialized, we can now instantiate and connect it as a node on the graph. Since on +web pages audio playback can only be initiated as a response to user input, we will also register an event handler +which resumes the audio context when the user clicks on the DOM Canvas element that exists on the page. + +.. code-block:: cpp + + void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) + { + if (!success) return; // Check browser console in a debug build for detailed errors + + int outputChannelCounts[1] = { 1 }; + EmscriptenAudioWorkletNodeCreateOptions options = { + .numberOfInputs = 0, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + + // Create node + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, + "noise-generator", &options, &GenerateNoise, 0); + + // Connect it to audio context destination + EM_ASM({emscriptenGetAudioObject($0).connect(emscriptenGetAudioObject($1).destination)}, + wasmAudioWorklet, audioContext); + + // Resume context on mouse click + emscripten_set_click_callback("canvas", (void*)audioContext, 0, OnCanvasClick); + } + +4. The code to resume the audio context on click looks like this: + +.. code-block:: cpp + + EM_BOOL OnCanvasClick(int eventType, const EmscriptenMouseEvent *mouseEvent, void *userData) + { + EMSCRIPTEN_WEBAUDIO_T audioContext = (EMSCRIPTEN_WEBAUDIO_T)userData; + if (emscripten_audio_context_state(audioContext) != AUDIO_CONTEXT_STATE_RUNNING) { + emscripten_resume_audio_context_sync(audioContext); + } + return EM_FALSE; + } + +5. Finally we can implement the audio callback that is to generate the noise: + +.. code-block:: cpp + + #include + + EM_BOOL GenerateNoise(int numInputs, const AudioSampleFrame *inputs, + int numOutputs, AudioSampleFrame *outputs, + int numParams, const AudioParamFrame *params, + void *userData) + { + for(int i = 0; i < numOutputs; ++i) + for(int j = 0; j < 128*outputs[i].numberOfChannels; ++j) + outputs[i].data[j] = emscripten_random() * 0.2 - 0.1; // Warning: scale down audio volume by factor of 0.2, raw noise can be really loud otherwise + + return EM_TRUE; // Keep the graph output going + } + +And that's it! Compile the code with the linker flags ``-sAUDIO_WORKLET=1 -sWASM_WORKERS=1`` to enable targeting AudioWorklets. + +Synchronizing audio thread with the main thread +=============================================== + +Wasm Audio Worklets API builds on top of the Emscripten Wasm Workers feature. This means that the Wasm Audio Worklet thread is modeled as if it was a Wasm Worker thread. + +To synchronize information between an Audio Worklet Node and other threads in the application, there are two options: + +1. Leverage the Web Audio "AudioParams" model. Each Audio Worklet Processor type is instantiated with a custom defined set of audio parameters that can affect the audio computation at sample precise accuracy. These parameters are passed in the ``params`` array into the audio processing function. + +The main browser thread that created the Web Audio context can adjust the values of these parameters whenever desired. See `MDN function: setValueAtTime `_ . + +2. Data can be shared with the Audio Worklet thread using GCC/Clang lock-free atomics operations, Emscripten atomics operations and the Wasm Worker API thread synchronization primitives. See :ref:`wasm_workers` for more information. + +3. Utilize the ``emscripten_audio_worklet_post_function_*()`` family of event passing functions. These functions operate similar to how the function family emscripten_wasm_worker_post_function_*()`` does. Posting functions enables a ``postMessage()`` style of communication, where the audio worklet thread and the main browser thread can send messages (function call dispatches) to each others. + + +More Examples +============= + +See the directory tests/webaudio/ for more code examples on Web Audio API and Wasm AudioWorklets. diff --git a/src/audio_worklet.js b/src/audio_worklet.js new file mode 100644 index 0000000000000..c24d6f47a45f4 --- /dev/null +++ b/src/audio_worklet.js @@ -0,0 +1,179 @@ +// This file is the main bootstrap script for Wasm Audio Worklets loaded in an Emscripten application. +// Build with -sAUDIO_WORKLET=1 linker flag to enable targeting Audio Worklets. + +// AudioWorkletGlobalScope does not have a onmessage/postMessage() functionality at the global scope, which +// means that after creating an AudioWorkletGlobalScope and loading this script into it, we cannot +// postMessage() information into it like one would do with Web Workers. + +// Instead, we must create an AudioWorkletProcessor class, then instantiate a Web Audio graph node from it +// on the main thread. Using its message port and the node constructor's +// "processorOptions" field, we can share the necessary bootstrap information from the main thread to +// the AudioWorkletGlobalScope. + +function createWasmAudioWorkletProcessor(audioParams) { + class WasmAudioWorkletProcessor extends AudioWorkletProcessor { + constructor(args) { + super(); + + // Copy needed stack allocation functions from the Module object + // to global scope, these will be accessed in hot paths, so maybe + // they'll be a bit faster to access directly, rather than referencing + // them as properties of the Module object. + globalThis.stackAlloc = Module['stackAlloc']; + globalThis.stackSave = Module['stackSave']; + globalThis.stackRestore = Module['stackRestore']; + globalThis.HEAPU32 = Module['HEAPU32']; + globalThis.HEAPF32 = Module['HEAPF32']; + + // Capture the Wasm function callback to invoke. + let opts = args.processorOptions; + this.callbackFunction = Module['wasmTable'].get(opts['cb']); + this.userData = opts['ud']; + } + + static get parameterDescriptors() { + return audioParams; + } + + process(inputList, outputList, parameters) { + // Marshal all inputs and parameters to the Wasm memory on the thread stack, + // then perform the wasm audio worklet call, + // and finally marshal audio output data back. + + let numInputs = inputList.length, + numOutputs = outputList.length, + numParams = 0, i, j, k, dataPtr, + stackMemoryNeeded = (numInputs + numOutputs) * 8, + oldStackPtr = stackSave(), + inputsPtr, outputsPtr, outputDataPtr, paramsPtr, + didProduceAudio, paramArray; + + // Calculate how much stack space is needed. + for(i of inputList) stackMemoryNeeded += i.length * 512; + for(i of outputList) stackMemoryNeeded += i.length * 512; + for(i in parameters) stackMemoryNeeded += parameters[i].byteLength + 8, ++numParams; + + // Allocate the necessary stack space. + inputsPtr = stackAlloc(stackMemoryNeeded); + + // Copy input audio descriptor structs and data to Wasm + k = inputsPtr >> 2; + dataPtr = inputsPtr + numInputs * 8; + for(i of inputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k++] = i.length; + HEAPU32[k++] = dataPtr; + // Marshal the input audio sample data for each audio channel of this input + for(j of i) { + HEAPF32.set(j, dataPtr>>2); + dataPtr += 512; + } + } + + // Copy output audio descriptor structs to Wasm + outputsPtr = dataPtr; + k = outputsPtr >> 2; + outputDataPtr = (dataPtr += numOutputs * 8) >> 2; + for(i of outputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k++] = i.length; + HEAPU32[k++] = dataPtr; + // Reserve space for the output data + dataPtr += 512 * i.length; + } + + // Copy parameters descriptor structs and data to Wasm + paramsPtr = dataPtr; + k = paramsPtr >> 2; + dataPtr += numParams * 8; + for(i = 0; paramArray = parameters[i++];) { + // Write the AudioParamFrame struct instance + HEAPU32[k++] = paramArray.length; + HEAPU32[k++] = dataPtr; + // Marshal the audio parameters array + HEAPF32.set(paramArray, dataPtr>>2); + dataPtr += paramArray.length*4; + } + + // Call out to Wasm callback to perform audio processing + if (didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData)) { + // Read back the produced audio data to all outputs and their channels. + // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, srcTypedArray, srcOffset, count) would sure be handy.. + // but web does not have one, so manually copy all bytes in) + for(i of outputList) { + for(j of i) { + for(k = 0; k < 128; ++k) { + j[k] = HEAPF32[outputDataPtr++]; + } + } + } + } + + stackRestore(oldStackPtr); + + // Return 'true' to tell the browser to continue running this processor. (Returning 1 or any other truthy value won't work in Chrome) + return !!didProduceAudio; + } + } + return WasmAudioWorkletProcessor; +} + +// Specify a worklet processor that will be used to receive messages to this AudioWorkletGlobalScope. +// We never connect this initial AudioWorkletProcessor to the audio graph to do any audio processing. +class BootstrapMessages extends AudioWorkletProcessor { + constructor(arg) { + super(); + // Initialize the global Emscripten Module object that contains e.g. the Wasm Module and Memory objects. + // After this we are ready to load in the main application JS script, which the main thread will addModule() + // to this scope. + globalThis.Module = arg['processorOptions']; +#if !MINIMAL_RUNTIME + // Default runtime relies on an injected instantiateWasm() function to initialize the Wasm Module. + globalThis.Module['instantiateWasm'] = (info, receiveInstance) => { + var instance = new WebAssembly.Instance(Module['wasm'], info); + receiveInstance(instance, Module['wasm']); + return instance.exports; + }; +#endif +#if WEBAUDIO_DEBUG + console.log('AudioWorklet global scope looks like this:'); + console.dir(globalThis); +#endif + // Listen to messages from the main thread. These messages will ask this scope to create the real + // AudioWorkletProcessors that call out to Wasm to do audio processing. + let p = globalThis['messagePort'] = this.port; + p.onmessage = (msg) => { + let d = msg.data; + if (d['_wpn']) { // '_wpn' is short for 'Worklet Processor Node', using an identifier that will never conflict with user messages +#if MODULARIZE + // Instantiate the MODULARIZEd Module function, which is stored for us under the special global + // name AudioWorkletModule in MODULARIZE+AUDIO_WORKLET builds. + if (globalThis.AudioWorkletModule) { + AudioWorkletModule(Module); // This populates the Module object with all the Wasm properties + delete globalThis.AudioWorkletModule; // We have now instantiated the Module function, can discard it from global scope + } +#endif + // Register a real AudioWorkletProcessor that will actually do audio processing. + registerProcessor(d['_wpn'], createWasmAudioWorkletProcessor(d['audioParams'])); +#if WEBAUDIO_DEBUG + console.log(`Registered a new WasmAudioWorkletProcessor "${d['_wpn']}" with AudioParams: ${d['audioParams']}`); +#endif + // Post a Wasm Call message back telling that we have now registered the AudioWorkletProcessor class, + // and should trigger the user onSuccess callback of the emscripten_create_wasm_audio_worklet_processor_async() call. + p.postMessage({'_wsc': d['callback'], 'x': [d['contextHandle'], 1/*EM_TRUE*/, d['userData']] }); // "WaSm Call" + } else if (d['_wsc']) { // '_wsc' is short for 'wasm call', using an identifier that will never conflict with user messages + Module['wasmTable'].get(d['_wsc'])(...d['x']); + }; + } + } + + // No-op, not doing audio processing in this processor. It is just for receiving bootstrap messages. + // However browsers require it to still be present. It should never be called because we never add a + // node to the graph with this processor, although it does look like Chrome does still call this function. + process() { + // keep this function a no-op. Chrome redundantly wants to call this even though this processor is never added to the graph. + } +}; + +// Register the dummy processor that will just receive messages. +registerProcessor("message", BootstrapMessages); diff --git a/src/closure-externs/minimal_runtime_worker_externs.js b/src/closure-externs/minimal_runtime_worker_externs.js index 246fa488fca7d..78d326b2f32a8 100644 --- a/src/closure-externs/minimal_runtime_worker_externs.js +++ b/src/closure-externs/minimal_runtime_worker_externs.js @@ -7,6 +7,7 @@ // These externs are needed for MINIMAL_RUNTIME + USE_PTHREADS // This file should go away in the future when worker.js is refactored to live inside the JS module. +/** @suppress {duplicate} */ var ENVIRONMENT_IS_PTHREAD; /** @suppress {duplicate} */ var wasmMemory; diff --git a/src/library.js b/src/library.js index efc00e2716a42..45f853b94b2e3 100644 --- a/src/library.js +++ b/src/library.js @@ -2358,7 +2358,7 @@ mergeInto(LibraryManager.library, { " };\n" + "} else " + #endif -#if USE_PTHREADS +#if USE_PTHREADS && !AUDIO_WORKLET // Pthreads need their clocks synchronized to the execution of the main thread, so, when using them, // make sure to adjust all timings to the respective time origins. "_emscripten_get_now = () => performance.timeOrigin + performance.now();\n", @@ -2368,9 +2368,15 @@ mergeInto(LibraryManager.library, { " _emscripten_get_now = dateNow;\n" + "} else " + #endif -#if MIN_IE_VERSION <= 9 || MIN_FIREFOX_VERSION <= 14 || MIN_CHROME_VERSION <= 23 || MIN_SAFARI_VERSION <= 80400 // https://caniuse.com/#feat=high-resolution-time +#if MIN_IE_VERSION <= 9 || MIN_FIREFOX_VERSION <= 14 || MIN_CHROME_VERSION <= 23 || MIN_SAFARI_VERSION <= 80400 || AUDIO_WORKLET // https://caniuse.com/#feat=high-resolution-time +// AudioWorkletGlobalScope does not have performance.now() (https://github.com/WebAudio/web-audio-api/issues/2527), so if building with +// Audio Worklets enabled, do a dynamic check for its presence. "if (typeof performance != 'undefined' && performance.now) {\n" + +#if USE_PTHREADS + " _emscripten_get_now = () => performance.timeOrigin + performance.now();\n" + +#else " _emscripten_get_now = () => performance.now();\n" + +#endif "} else {\n" + " _emscripten_get_now = Date.now;\n" + "}", diff --git a/src/library_pthread.js b/src/library_pthread.js index aabdb01a099bd..82edf2a555f13 100644 --- a/src/library_pthread.js +++ b/src/library_pthread.js @@ -87,7 +87,11 @@ var LibraryPThread = { #if ASSERTIONS PThread.debugInit(); #endif - if (ENVIRONMENT_IS_PTHREAD) { + if (ENVIRONMENT_IS_PTHREAD +#if AUDIO_WORKLET + || ENVIRONMENT_IS_AUDIO_WORKLET +#endif + ) { PThread.initWorker(); } else { PThread.initMainThread(); diff --git a/src/library_wasm_worker.js b/src/library_wasm_worker.js index 52e69de20b6b4..ef2d6df293950 100644 --- a/src/library_wasm_worker.js +++ b/src/library_wasm_worker.js @@ -70,22 +70,33 @@ mergeInto(LibraryManager.library, { #endif // Run the C side Worker initialization for stack and TLS. _emscripten_wasm_worker_initialize(m['sb'], m['sz']); +#if USE_PTHREADS + // Record that this Wasm Worker supports synchronous blocking in emscripten_futex_wake(). + ___set_thread_state(/*thread_ptr=*/0, /*is_main_thread=*/0, /*is_runtime_thread=*/0, /*supports_wait=*/0); +#endif #if STACK_OVERFLOW_CHECK >= 2 // Fix up stack base. (TLS frame is created at the bottom address end of the stack) // See https://github.com/emscripten-core/emscripten/issues/16496 ___set_stack_limits(_emscripten_stack_get_base(), _emscripten_stack_get_end()); #endif - // The Wasm Worker runtime is now up, so we can start processing - // any postMessage function calls that have been received. Drop the temp - // message handler that queued any pending incoming postMessage function calls ... - removeEventListener('message', __wasm_worker_appendToQueue); - // ... then flush whatever messages we may have already gotten in the queue, - // and clear __wasm_worker_delayedMessageQueue to undefined ... - __wasm_worker_delayedMessageQueue = __wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage); - // ... and finally register the proper postMessage handler that immediately - // dispatches incoming function calls without queueing them. - addEventListener('message', __wasm_worker_runPostMessage); +#if AUDIO_WORKLET + // Audio Worklets do not have postMessage()ing capabilities. + if (typeof AudioWorkletGlobalScope === 'undefined') { +#endif + // The Wasm Worker runtime is now up, so we can start processing + // any postMessage function calls that have been received. Drop the temp + // message handler that queued any pending incoming postMessage function calls ... + removeEventListener('message', __wasm_worker_appendToQueue); + // ... then flush whatever messages we may have already gotten in the queue, + // and clear __wasm_worker_delayedMessageQueue to undefined ... + __wasm_worker_delayedMessageQueue = __wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage); + // ... and finally register the proper postMessage handler that immediately + // dispatches incoming function calls without queueing them. + addEventListener('message', __wasm_worker_runPostMessage); +#if AUDIO_WORKLET + } +#endif }, #if WASM_WORKERS == 2 diff --git a/src/library_webaudio.js b/src/library_webaudio.js new file mode 100644 index 0000000000000..bf2aedebbb7b8 --- /dev/null +++ b/src/library_webaudio.js @@ -0,0 +1,328 @@ +#if AUDIO_WORKLET && !WASM_WORKERS +#error "Building with -sAUDIO_WORKLET also requires enabling -sWASM_WORKERS!" +#endif +#if AUDIO_WORKLET && TEXTDECODER == 2 +#error "-sAUDIO_WORKLET does not support -sTEXTDECODER=2 since TextDecoder is not available in AudioWorkletGlobalScope! Use e.g. -sTEXTDECODER=1 when building with -sAUDIO_WORKLET" +#endif +#if AUDIO_WORKLET && SINGLE_FILE +#error "-sAUDIO_WORKLET does not support -sSINGLE_FILE" +#endif + +let LibraryWebAudio = { + $EmAudio: {}, + $EmAudioCounter: 0, + + // Call this function from JavaScript to register a Wasm-side handle to an AudioContext that + // you have already created manually without calling emscripten_create_audio_context(). + // Note: To let that AudioContext be garbage collected later, call the function + // emscriptenDestroyAudioContext() to unbind it from Wasm. + $emscriptenRegisterAudioObject__deps: ['$EmAudio', '$EmAudioCounter'], + $emscriptenRegisterAudioObject: function(object) { +#if ASSERTIONS + assert(object, 'Called emscriptenRegisterAudioObject() with a null object handle!'); +#endif + EmAudio[++EmAudioCounter] = object; +#if WEBAUDIO_DEBUG + console.log(`Registered new WebAudio object ${object} with ID ${EmAudioCounter}`); +#endif + return EmAudioCounter; + }, + + // Call this function from JavaScript to destroy a Wasm-side handle to an AudioContext. + // After calling this function, it is no longer possible to reference this AudioContext + // from Wasm code - and the GC can reclaim it after all references to it are cleared. + $emscriptenDestroyAudioContext: 'emscripten_destroy_audio_context', + + // Call this function from JavaScript to get the Web Audio object corresponding to the given + // Wasm handle ID. + $emscriptenGetAudioObject: function(objectHandle) { + return EmAudio[objectHandle]; + }, + + // emscripten_create_audio_context() does not itself use emscriptenGetAudioObject() function, but mark it as a + // dependency, because the user will not be able to utilize the node unless they call emscriptenGetAudioObject() + // on it on JS side to connect it to the graph, so this avoids the user needing to manually do it on the command line. + emscripten_create_audio_context__deps: ['$emscriptenRegisterAudioObject', '$emscriptenGetAudioObject'], + emscripten_create_audio_context: function(options) { + let ctx = window.AudioContext || window.webkitAudioContext; +#if ASSERTIONS + if (!ctx) console.error('emscripten_create_audio_context failed! Web Audio is not supported.'); +#endif + options >>= 2; + + let opts = options ? { + latencyHint: HEAPU32[options] ? UTF8ToString(HEAPU32[options]) : void 0, + sampleRate: HEAP32[options+1] || void 0 + } : void 0; + +#if WEBAUDIO_DEBUG + console.log(`Creating new WebAudio context with parameters:`); + console.dir(opts); +#endif + + return ctx && emscriptenRegisterAudioObject(new ctx(opts)); + }, + + emscripten_resume_audio_context_async: function(contextHandle, callback, userData) { + function cb(state) { +#if WEBAUDIO_DEBUG + console.log(`emscripten_resume_audio_context_async() callback: New audio state="${EmAudio[contextHandle].state}", ID=${state}`); +#endif + {{{ makeDynCall('viii', 'callback') }}}(contextHandle, state, userData); + } +#if WEBAUDIO_DEBUG + console.log(`emscripten_resume_audio_context_async() resuming...`); +#endif + EmAudio[contextHandle].resume().then(() => { cb(1/*running*/) }).catch(() => { cb(0/*suspended*/) }); + }, + + emscripten_resume_audio_context_sync: function(contextHandle) { +#if ASSERTIONS + assert(EmAudio[contextHandle], `Called emscripten_resume_audio_context_sync() on a nonexisting context handle ${contextHandle}`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_resume_audio_context_sync() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif +#if WEBAUDIO_DEBUG + console.log(`AudioContext.resume() on WebAudio context with ID ${contextHandle}`); +#endif + EmAudio[contextHandle].resume(); + }, + + emscripten_audio_context_state: function(contextHandle) { +#if ASSERTIONS + assert(EmAudio[contextHandle], `Called emscripten_audio_context_state() on a nonexisting context handle ${contextHandle}`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_audio_context_state() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif + return ['suspended', 'running', 'closed', 'interrupted'].indexOf(EmAudio[contextHandle].state); + }, + + emscripten_destroy_audio_context__sig: 'vi', + emscripten_destroy_audio_context: function(contextHandle) { +#if ASSERTIONS + assert(EmAudio[contextHandle], `Called emscripten_destroy_audio_context() on an already freed context handle ${contextHandle}`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_destroy_audio_context() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif +#if WEBAUDIO_DEBUG + console.log(`Destroyed WebAudio context with ID ${contextHandle}`); +#endif + EmAudio[contextHandle].suspend(); + delete EmAudio[contextHandle]; + }, + + emscripten_destroy_web_audio_node: function(objectHandle) { +#if ASSERTIONS + assert(EmAudio[objectHandle], `Called emscripten_destroy_web_audio_node() on a nonexisting/already freed object handle ${objectHandle}`); + assert(EmAudio[objectHandle].disconnect, `Called emscripten_destroy_web_audio_node() on a handle ${objectHandle} that is not an Web Audio Node, but of type ${typeof EmAudio[objectHandle]}`); +#endif +#if WEBAUDIO_DEBUG + console.log(`Destroyed Web Audio Node with ID ${objectHandle}`); +#endif + // Explicitly disconnect the node from Web Audio graph before letting it GC, + // to work around browser bugs such as https://bugs.webkit.org/show_bug.cgi?id=222098#c23 + EmAudio[objectHandle].disconnect(); + delete EmAudio[objectHandle]; + }, + +#if AUDIO_WORKLET + emscripten_start_wasm_audio_worklet_thread_async__deps: [ + 'wasm_workers_id', + '$_EmAudioDispatchProcessorCallback'], + emscripten_start_wasm_audio_worklet_thread_async: function(contextHandle, stackLowestAddress, stackSize, callback, userData) { +#if !AUDIO_WORKLET + abort('emscripten_create_wasm_audio_worklet() requires building with -s AUDIO_WORKLET=1 enabled!'); +#endif + +#if ASSERTIONS + assert(contextHandle, `Called emscripten_start_wasm_audio_worklet_thread_async() with a null Web Audio Context handle!`); + assert(EmAudio[contextHandle], `Called emscripten_start_wasm_audio_worklet_thread_async() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_start_wasm_audio_worklet_thread_async() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif + + let audioContext = EmAudio[contextHandle], + audioWorklet = audioContext.audioWorklet; + +#if ASSERTIONS + assert(stackLowestAddress != 0, 'AudioWorklets require a dedicated stack space for audio data marshalling between Wasm and JS!'); + assert(stackLowestAddress % 16 == 0, `AudioWorklet stack should be aligned to 16 bytes! (was ${stackLowestAddress} == ${stackLowestAddress%16} mod 16) Use e.g. memalign(16, stackSize) to align the stack!`); + assert(stackSize != 0, 'AudioWorklets require a dedicated stack space for audio data marshalling between Wasm and JS!'); + assert(stackSize % 16 == 0, `AudioWorklet stack size should be a multiple of 16 bytes! (was ${stackSize} == ${stackSize%16} mod 16)`); + assert(!audioContext.audioWorkletInitialized, 'emscripten_create_wasm_audio_worklet() was already called for AudioContext ' + contextHandle + '! Only call this function once per AudioContext!'); + audioContext.audioWorkletInitialized = 1; +#endif + +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() adding audioworklet.js...`); +#endif + + let audioWorkletCreationFailed = () => { +#if WEBAUDIO_DEBUG + console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); +#endif + {{{ makeDynCall('viii', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); + }; + + // Does browser not support AudioWorklets? + if (!audioWorklet) { +#if WEBAUDIO_DEBUG + if (location.protocol == 'http:') { + console.error(`AudioWorklets are not supported. This is possibly due to running the page over unsecure http:// protocol. Try running over https://, or debug via a localhost-based server, which should also allow AudioWorklets to function.`); + } else { + console.error(`AudioWorklets are not supported by current browser.`); + } +#endif + return audioWorkletCreationFailed(); + } + + // TODO: In MINIMAL_RUNTIME builds, read this file off of a preloaded Blob, and/or embed from a string like with WASM_WORKERS==2 mode. + audioWorklet.addModule('{{{ TARGET_BASENAME }}}.aw.js').then(() => { +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule('audioworklet.js') completed`); +#endif + audioWorklet.bootstrapMessage = new AudioWorkletNode(audioContext, 'message', { + processorOptions: { + '$ww': _wasm_workers_id++, // Assign the loaded AudioWorkletGlobalScope a Wasm Worker ID so that it can utilized its own TLS slots, and it is recognized to not be the main browser thread. +#if MINIMAL_RUNTIME + 'wasm': Module['wasm'], + 'mem': wasmMemory, +#else + 'wasm': wasmModule, + 'wasmMemory': wasmMemory, +#endif + 'sb': stackLowestAddress, // sb = stack base + 'sz': stackSize, // sz = stack size + } + }); + audioWorklet.bootstrapMessage.port.onmessage = _EmAudioDispatchProcessorCallback; + + // AudioWorklets do not have a importScripts() function like Web Workers do (and AudioWorkletGlobalScope does not allow dynamic import() either), + // but instead, the main thread must load all JS code into the worklet scope. Send the application main JS script to the audio worklet. + return audioWorklet.addModule( +#if MINIMAL_RUNTIME + Module['js'] +#else + Module['mainScriptUrlOrBlob'] || _scriptDir +#endif + ); + }).then(() => { +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule() of main application JS completed`); +#endif + {{{ makeDynCall('viii', 'callback') }}}(contextHandle, 1/*EM_TRUE*/, userData); + }).catch(audioWorkletCreationFailed); + }, + + $_EmAudioDispatchProcessorCallback: function(e) { + let data = e.data, wasmCall = data['_wsc']; // '_wsc' is short for 'wasm call', trying to use an identifier name that will never conflict with user code + wasmCall && getWasmTableEntry(wasmCall)(...data['x']); + }, + + emscripten_create_wasm_audio_worklet_processor_async: function(contextHandle, options, callback, userData) { +#if ASSERTIONS + assert(contextHandle, `Called emscripten_create_wasm_audio_worklet_processor_async() with a null Web Audio Context handle!`); + assert(EmAudio[contextHandle], `Called emscripten_create_wasm_audio_worklet_processor_async() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_processor_async() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif + + options >>= 2; + let audioParams = [], + numAudioParams = HEAPU32[options+1], + audioParamDescriptors = HEAPU32[options+2] >> 2, + i = 0; + + while(numAudioParams--) { + audioParams.push({ + name: i++, + defaultValue: HEAPF32[audioParamDescriptors++], + minValue: HEAPF32[audioParamDescriptors++], + maxValue: HEAPF32[audioParamDescriptors++], + automationRate: ['a','k'][HEAPU32[audioParamDescriptors++]] + '-rate', + }); + } + +#if WEBAUDIO_DEBUG + console.log(`emscripten_create_wasm_audio_worklet_processor_async() creating a new AudioWorklet processor with name ${UTF8ToString(HEAPU32[options])}`); +#endif + + EmAudio[contextHandle].audioWorklet.bootstrapMessage.port.postMessage({ + _wpn: UTF8ToString(HEAPU32[options]), // '_wpn' == 'Worklet Processor Name', use a deliberately mangled name so that this field won't accidentally be mixed with user submitted messages. + audioParams: audioParams, + contextHandle: contextHandle, + callback: callback, + userData: userData + }); + }, + + emscripten_create_wasm_audio_worklet_node: function(contextHandle, name, options, callback, userData) { +#if ASSERTIONS + assert(contextHandle, `Called emscripten_create_wasm_audio_worklet_node() with a null Web Audio Context handle!`); + assert(EmAudio[contextHandle], `Called emscripten_create_wasm_audio_worklet_node() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); + assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_node() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#endif + options >>= 2; + + function readChannelCountArray(heapIndex, numOutputs) { + let channelCounts = []; + while(numOutputs--) channelCounts.push(HEAPU32[heapIndex++]); + return channelCounts; + } + + let opts = options ? { + numberOfInputs: HEAP32[options], + numberOfOutputs: HEAP32[options+1], + outputChannelCount: HEAPU32[options+2] ? readChannelCountArray(HEAPU32[options+2]>>2, HEAP32[options+1]) : void 0, + processorOptions: { 'cb': callback, 'ud': userData } + } : void 0; + +#if WEBAUDIO_DEBUG + console.log(`Creating AudioWorkletNode "${UTF8ToString(name)}" on context=${contextHandle} with options:`); + console.dir(opts); +#endif + return emscriptenRegisterAudioObject(new AudioWorkletNode(EmAudio[contextHandle], UTF8ToString(name), opts)); + }, +#endif // ~AUDIO_WORKLET + + emscripten_current_thread_is_audio_worklet: function() { + return typeof AudioWorkletGlobalScope !== 'undefined'; + }, + + emscripten_audio_worklet_post_function_v__sig: 'vip', + emscripten_audio_worklet_post_function_v: function(audioContext, funcPtr) { + (audioContext ? EmAudio[audioContext].audioWorklet.bootstrapMessage.port : globalThis['messagePort']).postMessage({'_wsc': funcPtr, 'x': [] }); // "WaSm Call" + }, + + emscripten_audio_worklet_post_function_1__sig: 'vipd', + emscripten_audio_worklet_post_function_1: function(audioContext, funcPtr, arg0) { + (audioContext ? EmAudio[audioContext].audioWorklet.bootstrapMessage.port : globalThis['messagePort']).postMessage({'_wsc': funcPtr, 'x': [arg0] }); // "WaSm Call" + }, + + emscripten_audio_worklet_post_function_vi: 'emscripten_audio_worklet_post_function_1', + emscripten_audio_worklet_post_function_vd: 'emscripten_audio_worklet_post_function_1', + + emscripten_audio_worklet_post_function_2__sig: 'vipdd', + emscripten_audio_worklet_post_function_2: function(audioContext, funcPtr, arg0, arg1) { + (audioContext ? EmAudio[audioContext].audioWorklet.bootstrapMessage.port : globalThis['messagePort']).postMessage({'_wsc': funcPtr, 'x': [arg0, arg1] }); // "WaSm Call" + }, + emscripten_audio_worklet_post_function_vii: 'emscripten_audio_worklet_post_function_2', + emscripten_audio_worklet_post_function_vdd: 'emscripten_audio_worklet_post_function_2', + + emscripten_audio_worklet_post_function_3__sig: 'vipddd', + emscripten_audio_worklet_post_function_3: function(audioContext, funcPtr, arg0, arg1, arg2) { + (audioContext ? EmAudio[audioContext].audioWorklet.bootstrapMessage.port : globalThis['messagePort']).postMessage({'_wsc': funcPtr, 'x': [arg0, arg1, arg2] }); // "WaSm Call" + }, + emscripten_audio_worklet_post_function_viii: 'emscripten_audio_worklet_post_function_3', + emscripten_audio_worklet_post_function_vddd: 'emscripten_audio_worklet_post_function_3', + + emscripten_audio_worklet_post_function_sig__deps: ['$readAsmConstArgs'], + emscripten_audio_worklet_post_function_sig__sig: 'vippp', + emscripten_audio_worklet_post_function_sig: function(audioContext, funcPtr, sigPtr, varargs) { +#if ASSERTIONS + assert(audioContext >= 0); + assert(funcPtr); + assert(sigPtr); + assert(UTF8ToString(sigPtr)[0] != 'v', 'Do NOT specify the return argument in the signature string for a call to emscripten_audio_worklet_post_function_sig(), just pass the function arguments.'); + assert(varargs); +#endif + (audioContext ? EmAudio[audioContext].audioWorklet.bootstrapMessage.port : globalThis['messagePort']).postMessage({'_wsc': funcPtr, 'x': readAsmConstArgs(sigPtr, varargs) }); + } +}; + +mergeInto(LibraryManager.library, LibraryWebAudio); diff --git a/src/postamble_minimal.js b/src/postamble_minimal.js index 2a536e23b70f0..efc9381c86a3c 100644 --- a/src/postamble_minimal.js +++ b/src/postamble_minimal.js @@ -200,6 +200,24 @@ WebAssembly.instantiate(Module['wasm'], imports).then(function(output) { assert(wasmTable); #endif +#if AUDIO_WORKLET + // If we are in the audio worklet environment, we can only access the Module object + // and not the global scope of the main JS script. Therefore we need to export + // all functions that the audio worklet scope needs onto the Module object. + Module['wasmTable'] = wasmTable; +#if ASSERTIONS + // In ASSERTIONS-enabled builds, the following symbols have gotten read-only getters + // saved to the Module. Remove those getters so we can manually export the stack + // functions here. + delete Module['stackSave']; + delete Module['stackAlloc']; + delete Module['stackRestore']; +#endif + Module['stackSave'] = stackSave; + Module['stackAlloc'] = stackAlloc; + Module['stackRestore'] = stackRestore; +#endif + #if !IMPORTED_MEMORY wasmMemory = asm['memory']; #if ASSERTIONS diff --git a/src/preamble.js b/src/preamble.js index fcb076e908b3f..31f5e86ece99c 100644 --- a/src/preamble.js +++ b/src/preamble.js @@ -1004,6 +1004,13 @@ function createWasm() { #endif #endif +#if AUDIO_WORKLET + // If we are in the audio worklet environment, we can only access the Module object + // and not the global scope of the main JS script. Therefore we need to export + // all functions that the audio worklet scope needs onto the Module object. + Module['wasmTable'] = wasmTable; +#endif + #if hasExportedSymbol('__wasm_call_ctors') addOnInit(Module['asm']['__wasm_call_ctors']); #endif diff --git a/src/preamble_minimal.js b/src/preamble_minimal.js index ac15d52ecb204..7d992ae1aad4d 100644 --- a/src/preamble_minimal.js +++ b/src/preamble_minimal.js @@ -10,6 +10,12 @@ global.maybeExport = function(x) { return MODULARIZE && EXPORT_ALL ? `Module['${x}'] = ` : ''; }; + // Export to the AudioWorkletGlobalScope the needed variables to access + // the heap. AudioWorkletGlobalScope is unable to access global JS vars + // in the compiled main JS file. + global.maybeExportIfAudioWorklet = function(x) { + return (MODULARIZE && EXPORT_ALL) || AUDIO_WORKLET ? `Module['${x}'] = ` : ''; + }; null; }}} @@ -71,19 +77,19 @@ function updateMemoryViews() { assert(b instanceof SharedArrayBuffer, 'requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag'); #endif #if SUPPORT_BIG_ENDIAN - {{{ maybeExport('HEAP_DATA_VIEW') }}}HEAP_DATA_VIEW = new DataView(b); -#endif - {{{ maybeExport('HEAP8') }}}HEAP8 = new Int8Array(b); - {{{ maybeExport('HEAP16') }}}HEAP16 = new Int16Array(b); - {{{ maybeExport('HEAP32') }}}HEAP32 = new Int32Array(b); - {{{ maybeExport('HEAPU8') }}}HEAPU8 = new Uint8Array(b); - {{{ maybeExport('HEAPU16') }}}HEAPU16 = new Uint16Array(b); - {{{ maybeExport('HEAPU32') }}}HEAPU32 = new Uint32Array(b); - {{{ maybeExport('HEAPF32') }}}HEAPF32 = new Float32Array(b); - {{{ maybeExport('HEAPF64') }}}HEAPF64 = new Float64Array(b); + {{{ maybeExport('HEAP_DATA_VIEW') }}} HEAP_DATA_VIEW = new DataView(b); +#endif + {{{ maybeExport('HEAP8') }}} HEAP8 = new Int8Array(b); + {{{ maybeExport('HEAP16') }}} HEAP16 = new Int16Array(b); + {{{ maybeExport('HEAP32') }}} HEAP32 = new Int32Array(b); + {{{ maybeExport('HEAPU8') }}} HEAPU8 = new Uint8Array(b); + {{{ maybeExport('HEAPU16') }}} HEAPU16 = new Uint16Array(b); + {{{ maybeExportIfAudioWorklet('HEAPU32') }}} HEAPU32 = new Uint32Array(b); + {{{ maybeExportIfAudioWorklet('HEAPF32') }}} HEAPF32 = new Float32Array(b); + {{{ maybeExport('HEAPF64') }}} HEAPF64 = new Float64Array(b); #if WASM_BIGINT - {{{ maybeExport('HEAP64') }}}HEAP64 = new BigInt64Array(b); - {{{ maybeExport('HEAPU64') }}}HEAPU64 = new BigUint64Array(b); + {{{ maybeExport('HEAP64') }}} HEAP64 = new BigInt64Array(b); + {{{ maybeExport('HEAPU64') }}} HEAPU64 = new BigUint64Array(b); #endif } diff --git a/src/runtime_debug.js b/src/runtime_debug.js index f5b79e31cd320..89cebe542b536 100644 --- a/src/runtime_debug.js +++ b/src/runtime_debug.js @@ -76,7 +76,7 @@ function missingLibrarySymbol(sym) { } }); } - // Any symbol that is not included from the JS libary is also (by definttion) + // Any symbol that is not included from the JS libary is also (by definition) // not exported on the Module object. unexportedRuntimeSymbol(sym); } diff --git a/src/settings.js b/src/settings.js index c236c919cc5d1..b9c88e5f08040 100644 --- a/src/settings.js +++ b/src/settings.js @@ -1533,6 +1533,15 @@ var USE_PTHREADS = false; // [compile+link] - affects user code at compile and system libraries at link. var WASM_WORKERS = 0; +// If true, enables targeting Wasm Web Audio AudioWorklets. Check out the +// full documentation in site/source/docs/api_reference/wasm_audio_worklets.rst +// [link] +var AUDIO_WORKLET = 0; + +// If true, enables deep debugging of Web Audio backend. +// [link] +var WEBAUDIO_DEBUG = 0; + // In web browsers, Workers cannot be created while the main browser thread // is executing JS/Wasm code, but the main thread must regularly yield back // to the browser event loop for Worker initialization to occur. diff --git a/src/settings_internal.js b/src/settings_internal.js index 5e83803ca0c57..2ad8c815a20ec 100644 --- a/src/settings_internal.js +++ b/src/settings_internal.js @@ -137,6 +137,9 @@ var PTHREAD_WORKER_FILE = ''; // name of the file containing the Wasm Worker *.ww.js, if relevant var WASM_WORKER_FILE = ''; +// name of the file containing the Audio Worklet *.aw.js, if relevant +var AUDIO_WORKLET_FILE = ''; + // Base URL the source mapfile, if relevant var SOURCE_MAP_BASE = ''; @@ -166,6 +169,9 @@ var MINIFY_ASMJS_EXPORT_NAMES = true; // Internal: represents a browser version that is not supported at all. var TARGET_NOT_SUPPORTED = 0x7FFFFFFF; +// Used to track whether target environment supports the 'globalThis' attribute. +var SUPPORTS_GLOBALTHIS = false; + // Wasm backend symbols that are considered system symbols and don't // have the normal C symbol name mangled applied (== prefix with an underscore) // (Also implicily on this list is any function that starts with string "dynCall_") diff --git a/src/shell.js b/src/shell.js index 6f74e47aac3f2..96653cfc90fea 100644 --- a/src/shell.js +++ b/src/shell.js @@ -34,6 +34,8 @@ var /** @type {{ }} */ Module; if (!Module) /** @suppress{checkTypes}*/Module = {"__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__":1}; +#elif AUDIO_WORKLET +var Module = globalThis.Module || (typeof {{{ EXPORT_NAME }}} != 'undefined' ? {{{ EXPORT_NAME }}} : {}); #else var Module = typeof {{{ EXPORT_NAME }}} != 'undefined' ? {{{ EXPORT_NAME }}} : {}; #endif // USE_CLOSURE_COMPILER @@ -90,6 +92,10 @@ var quit_ = (status, toThrow) => { // Determine the runtime environment we are in. You can customize this by // setting the ENVIRONMENT setting at compile time (see settings.js). +#if AUDIO_WORKLET +var ENVIRONMENT_IS_AUDIO_WORKLET = typeof AudioWorkletGlobalScope !== 'undefined'; +#endif + #if ENVIRONMENT && !ENVIRONMENT.includes(',') var ENVIRONMENT_IS_WEB = {{{ ENVIRONMENT === 'web' }}}; #if USE_PTHREADS && ENVIRONMENT_MAY_BE_NODE @@ -107,7 +113,11 @@ var ENVIRONMENT_IS_WORKER = typeof importScripts == 'function'; // N.b. Electron.js environment is simultaneously a NODE-environment, but // also a web environment. var ENVIRONMENT_IS_NODE = typeof process == 'object' && typeof process.versions == 'object' && typeof process.versions.node == 'string'; +#if AUDIO_WORKLET +var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER && !ENVIRONMENT_IS_AUDIO_WORKLET; +#else var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER; +#endif #endif // ENVIRONMENT #if ASSERTIONS @@ -416,6 +426,9 @@ if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) { setWindowTitle = (title) => document.title = title; } else #endif // ENVIRONMENT_MAY_BE_WEB || ENVIRONMENT_MAY_BE_WORKER +#if AUDIO_WORKLET && ASSERTIONS +if (!ENVIRONMENT_IS_AUDIO_WORKLET) +#endif { #if ASSERTIONS throw new Error('environment detection error'); @@ -491,7 +504,12 @@ assert(typeof Module['TOTAL_MEMORY'] == 'undefined', 'Module.TOTAL_MEMORY has be #endif #if USE_PTHREADS -assert(ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER || ENVIRONMENT_IS_NODE, 'Pthreads do not work in this environment yet (need Web Workers, or an alternative to them)'); +assert( +#if AUDIO_WORKLET + ENVIRONMENT_IS_AUDIO_WORKLET || +#endif + ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER || ENVIRONMENT_IS_NODE, 'Pthreads do not work in this environment yet (need Web Workers, or an alternative to them)'); +#else #endif // USE_PTHREADS #if !ENVIRONMENT_MAY_BE_WEB diff --git a/src/shell_minimal.js b/src/shell_minimal.js index f1a14cef39fa7..ac481128cd9b7 100644 --- a/src/shell_minimal.js +++ b/src/shell_minimal.js @@ -5,17 +5,32 @@ */ #if USE_CLOSURE_COMPILER + // if (!Module)` is crucial for Closure Compiler here as it will // otherwise replace every `Module` occurrence with the object below var /** @type{Object} */ Module; -if (!Module) /** @suppress{checkTypes}*/Module = {"__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__":1}; -#elif ENVIRONMENT_MAY_BE_NODE || ENVIRONMENT_MAY_BE_SHELL +if (!Module) /** @suppress{checkTypes}*/Module = +#if AUDIO_WORKLET + globalThis.{{{ EXPORT_NAME }}} || +#endif + {"__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__":1}; + +#elif !MODULARIZE && (ENVIRONMENT_MAY_BE_NODE || ENVIRONMENT_MAY_BE_SHELL) + // When running on the web we expect Module to be defined externally, in the // HTML. Otherwise we must define it here before its first use -var Module = typeof {{{ EXPORT_NAME }}} != 'undefined' ? {{{ EXPORT_NAME }}} : {}; +var Module = +#if SUPPORTS_GLOBALTHIS + // As a small code size optimization, we can use 'globalThis' to refer to the global scope Module variable. + globalThis.{{{ EXPORT_NAME }}} || {}; +#else + // Otherwise do a good old typeof check. + typeof {{{ EXPORT_NAME }}} != 'undefined' ? {{{ EXPORT_NAME }}} : {}; +#endif + #else var Module = {{{ EXPORT_NAME }}}; -#endif // USE_CLOSURE_COMPILER +#endif #if MODULARIZE && EXPORT_READY_PROMISE // Set up the promise that indicates the Module is initialized @@ -37,6 +52,10 @@ var ENVIRONMENT_IS_NODE = typeof process == 'object'; var ENVIRONMENT_IS_SHELL = typeof read == 'function'; #endif +#if AUDIO_WORKLET +var ENVIRONMENT_IS_AUDIO_WORKLET = typeof AudioWorkletGlobalScope !== 'undefined'; +#endif + #if ASSERTIONS || USE_PTHREADS #if !ENVIRONMENT_MAY_BE_NODE && !ENVIRONMENT_MAY_BE_SHELL var ENVIRONMENT_IS_WEB = true diff --git a/system/include/emscripten/webaudio.h b/system/include/emscripten/webaudio.h new file mode 100644 index 0000000000000..f2f6e6de003eb --- /dev/null +++ b/system/include/emscripten/webaudio.h @@ -0,0 +1,156 @@ +/* + * Copyright 2022 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ + +#pragma once + +#include +#include + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int EMSCRIPTEN_WEBAUDIO_T; + +typedef struct EmscriptenWebAudioCreateAttributes +{ + const char *latencyHint; // Specify one of "balanced", "interactive" or "playback" + uint32_t sampleRate; // E.g. 44100 or 48000 +} EmscriptenWebAudioCreateAttributes; + +// Creates a new Web Audio AudioContext, and returns a handle to it. +EMSCRIPTEN_WEBAUDIO_T emscripten_create_audio_context(const EmscriptenWebAudioCreateAttributes *options); + +typedef int AUDIO_CONTEXT_STATE; +#define AUDIO_CONTEXT_STATE_SUSPENDED 0 +#define AUDIO_CONTEXT_STATE_RUNNING 1 +#define AUDIO_CONTEXT_STATE_CLOSED 2 +#define AUDIO_CONTEXT_STATE_INTERRUPTED 3 + +typedef void (*EmscriptenResumeAudioContextCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, AUDIO_CONTEXT_STATE state, void *userData); + +// Resumes the given AudioContext. The specified callback will fire when the AudioContext has completed resuming. Call this function +// inside a user event handler (mousedown, button click, etc.) +void emscripten_resume_audio_context_async(EMSCRIPTEN_WEBAUDIO_T audioContext, EmscriptenResumeAudioContextCallback callback, void *userData); + +// Synchronously attempts to resume the given AudioContext. +void emscripten_resume_audio_context_sync(EMSCRIPTEN_WEBAUDIO_T audioContext); + +// Returns the current AudioContext state. +AUDIO_CONTEXT_STATE emscripten_audio_context_state(EMSCRIPTEN_WEBAUDIO_T audioContext); + +typedef void (*EmscriptenStartWebAudioWorkletCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData); + +// Calls .suspend() on the given AudioContext and releases the JS object table +// reference to the given audio context. The specified handle is invalid +// after calling this function. +void emscripten_destroy_audio_context(EMSCRIPTEN_WEBAUDIO_T audioContext); + +// Disconnects the given audio node from its audio graph, and then releases +// the JS object table reference to the given audio node. The specified handle +// is invalid after calling this function. +void emscripten_destroy_web_audio_node(EMSCRIPTEN_WEBAUDIO_T objectHandle); + +// Create Wasm AudioWorklet thread. Call this function once at application startup to establish an AudioWorkletGlobalScope for your app. +// After the scope has been initialized, the given callback will fire. +// audioContext: The Web Audio context object to initialize the Wasm AudioWorklet thread on. Each AudioContext can have only one AudioWorklet +// thread running, so do not call this function a multiple times on the same AudioContext. +// stackLowestAddress: The base address for the thread's stack. Must be aligned to 16 bytes. Use e.g. memalign(16, 1024) to allocate a 1KB stack for the thread. +// stackSize: The size of the thread's stack. Must be a multiple of 16 bytes. +// callback: The callback function that will be run when thread creation either succeeds or fails. +// userData: A custom userdata pointer to pass to the callback function. +void emscripten_start_wasm_audio_worklet_thread_async(EMSCRIPTEN_WEBAUDIO_T audioContext, void *stackLowestAddress, uint32_t stackSize, EmscriptenStartWebAudioWorkletCallback callback, void *userData); + +typedef int WEBAUDIO_PARAM_AUTOMATION_RATE; +#define WEBAUDIO_PARAM_A_RATE 0 +#define WEBAUDIO_PARAM_K_RATE 1 + +typedef struct WebAudioParamDescriptor +{ + float defaultValue; // Default == 0.0 + float minValue; // Default = -3.4028235e38; + float maxValue; // Default = 3.4028235e38; + WEBAUDIO_PARAM_AUTOMATION_RATE automationRate; // Either WEBAUDIO_PARAM_A_RATE or WEBAUDIO_PARAM_K_RATE. Default = WEBAUDIO_PARAM_A_RATE +} WebAudioParamDescriptor; + +typedef struct WebAudioWorkletProcessorCreateOptions +{ + const char *name; // The name of the AudioWorkletProcessor that is being created. + + int numAudioParams; + const WebAudioParamDescriptor *audioParamDescriptors; +} WebAudioWorkletProcessorCreateOptions; + +typedef void (*EmscriptenWorkletProcessorCreatedCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData); + +// Creates a new AudioWorkletProcessor with the given name and specified set of control parameters. +void emscripten_create_wasm_audio_worklet_processor_async(EMSCRIPTEN_WEBAUDIO_T audioContext, const WebAudioWorkletProcessorCreateOptions *options, EmscriptenWorkletProcessorCreatedCallback callback, void *userData); + +typedef int EMSCRIPTEN_AUDIO_WORKLET_NODE_T; + +typedef struct AudioSampleFrame +{ + const int numberOfChannels; + // An array of length numberOfChannels*128 elements, where data[channelIndex*128+i] locates the data of the i'th sample of channel channelIndex. + float *data; +} AudioSampleFrame; + +typedef struct AudioParamFrame +{ + // Specifies the length of the input array data (in float elements). This will be guaranteed to either have + // a value of 1 or 128, depending on whether the audio parameter changed during this frame. + int length; + // An array of length specified in 'length'. + float *data; +} AudioParamFrame; + +typedef EM_BOOL (*EmscriptenWorkletNodeProcessCallback)(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData); + +typedef struct EmscriptenAudioWorkletNodeCreateOptions +{ + // How many audio nodes does this node take inputs from? Default=1 + int numberOfInputs; + // How many audio nodes does this node output to? Default=1 + int numberOfOutputs; + // For each output, specifies the number of audio channels (1=mono/2=stereo/etc.) for that output. Default=an array of ones for each output channel. + int *outputChannelCounts; +} EmscriptenAudioWorkletNodeCreateOptions; + +// Instantiates the given AudioWorkletProcessor as an AudioWorkletNode, which continuously calls the specified processCallback() function on the browser's audio thread to perform audio processing. +EMSCRIPTEN_AUDIO_WORKLET_NODE_T emscripten_create_wasm_audio_worklet_node(EMSCRIPTEN_WEBAUDIO_T audioContext, const char *name, const EmscriptenAudioWorkletNodeCreateOptions *options, EmscriptenWorkletNodeProcessCallback processCallback, void *userData); + +// Returns EM_TRUE if the current thread is executing a Wasm AudioWorklet, EM_FALSE otherwise. +// Note that calling this function can be relatively slow as it incurs a Wasm->JS transition, +// so avoid calling it in hot paths. +EM_BOOL emscripten_current_thread_is_audio_worklet(void); + +#define EMSCRIPTEN_AUDIO_MAIN_THREAD 0 + +/* emscripten_audio_worklet_function_*: Post a pointer to a C/C++ function to be executed either + on the Audio Worklet thread of the given Web Audio context. Notes: + - If running inside an Audio Worklet thread, specify ID EMSCRIPTEN_AUDIO_MAIN_THREAD (== 0) to pass a message + from the audio worklet to the main thread. + - When specifying non-zero ID, the Audio Context denoted by the ID must have been created by the calling thread. + - Passing messages between audio thread and main thread with this family of functions is relatively slow and has + a really high latency cost compared to direct coordination using atomics and synchronization primitives like + mutexes and synchronization primitives. Additionally these functions will generate garbage on the JS heap. + Therefore avoid using these functions where performance is critical. */ +void emscripten_audio_worklet_post_function_v(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(void)); +void emscripten_audio_worklet_post_function_vi(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(int), int arg0); +void emscripten_audio_worklet_post_function_vii(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(int, int), int arg0, int arg1); +void emscripten_audio_worklet_post_function_viii(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(int, int, int), int arg0, int arg1, int arg2); +void emscripten_audio_worklet_post_function_vd(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(double), double arg0); +void emscripten_audio_worklet_post_function_vdd(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(double, double), double arg0, double arg1); +void emscripten_audio_worklet_post_function_vddd(EMSCRIPTEN_WEBAUDIO_T id, void (*funcPtr)(double, double, double), double arg0, double arg1, double arg2); +void emscripten_audio_worklet_post_function_sig(EMSCRIPTEN_WEBAUDIO_T id, void *funcPtr, const char *sig, ...); + +#ifdef __cplusplus +} // ~extern "C" +#endif diff --git a/system/lib/pthread/emscripten_futex_wait.c b/system/lib/pthread/emscripten_futex_wait.c index 5b60d7e6ba91e..505f19cac2a15 100644 --- a/system/lib/pthread/emscripten_futex_wait.c +++ b/system/lib/pthread/emscripten_futex_wait.c @@ -126,18 +126,12 @@ int emscripten_futex_wait(volatile void *addr, uint32_t val, double max_wait_ms) int ret; emscripten_conditional_set_current_thread_status(EM_THREAD_STATUS_RUNNING, EM_THREAD_STATUS_WAITFUTEX); - // For the main browser thread we can't use + // For the main browser thread and audio worklets we can't use // __builtin_wasm_memory_atomic_wait32 so we have busy wait instead. if (!_emscripten_thread_supports_atomics_wait()) { - if (emscripten_is_main_browser_thread()) { - ret = futex_wait_main_browser_thread(addr, val, max_wait_ms); - emscripten_conditional_set_current_thread_status(EM_THREAD_STATUS_WAITFUTEX, EM_THREAD_STATUS_RUNNING); - return ret; - } else { - // TODO: handle non-main threads that also don't support `atomic.wait`. - // For example AudioWorklet. - assert(0); - } + ret = futex_wait_main_browser_thread(addr, val, max_wait_ms); + emscripten_conditional_set_current_thread_status(EM_THREAD_STATUS_WAITFUTEX, EM_THREAD_STATUS_RUNNING); + return ret; } // -1 (or any negative number) means wait indefinitely. diff --git a/system/lib/pthread/thread_profiler.c b/system/lib/pthread/thread_profiler.c index 48fcd225bfcdc..5598ed0fb3f5e 100644 --- a/system/lib/pthread/thread_profiler.c +++ b/system/lib/pthread/thread_profiler.c @@ -34,6 +34,7 @@ static void set_status_conditional(int expectedStatus, int newStatus) { return; } pthread_t thread = pthread_self(); + if (!thread) return; // AudioWorklets do not have a pthread block, but if user calls emscripten_futex_wait() in an AudioWorklet, it will call here via emscripten_set_current_thread_status(). int prevStatus = thread->profilerBlock->threadStatus; if (prevStatus != newStatus && (prevStatus == expectedStatus || expectedStatus == -1)) { diff --git a/test/browser_reporting.js b/test/browser_reporting.js index ee271fe3997e3..a132a641a47a0 100644 --- a/test/browser_reporting.js +++ b/test/browser_reporting.js @@ -9,7 +9,7 @@ function reportResultToServer(result, sync, port) { reportErrorToServer("excessive reported results, sending " + result + ", test will fail"); } reportResultToServer.reported = true; - if (typeof ENVIRONMENT_IS_NODE !== 'undefined' && ENVIRONMENT_IS_NODE) { + if ((typeof ENVIRONMENT_IS_NODE !== 'undefined' && ENVIRONMENT_IS_NODE) || (typeof ENVIRONMENT_IS_AUDIO_WORKLET !== 'undefined' && ENVIRONMENT_IS_AUDIO_WORKLET)) { out('RESULT: ' + result); } else { var xhr = new XMLHttpRequest(); diff --git a/test/test_browser.py b/test/test_browser.py index 8a3d2b12e5725..8100ccb674f9f 100644 --- a/test/test_browser.py +++ b/test/test_browser.py @@ -4391,7 +4391,7 @@ def test_small_js_flags(self): size = os.path.getsize('test.js') print('size:', size) # Note that this size includes test harness additions (for reporting the result, etc.). - self.assertLess(abs(size - 5059), 100) + self.assertLess(abs(size - 5100), 100) # Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas. # -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there. @@ -5485,6 +5485,28 @@ def test_pthread_key_recreation(self): def test_full_js_library_strict(self): self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS']) + # Tests the AudioWorklet demo + @parameterized({ + 'default': ([],), + 'with_fs': (['--preload-file', test_file('hello_world.c') + '@/'],), + 'closure': (['--closure', '1', '-Oz'],), + 'asyncify': (['-sASYNCIFY'],), + 'pthreads': (['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'],), + 'pthreads_and_closure': (['-sUSE_PTHREADS', '--closure', '1', '-Oz'],), + 'minimal_runtime': (['-sMINIMAL_RUNTIME'],), + 'minimal_runtime_pthreads_and_closure': (['-sMINIMAL_RUNTIME', '-sUSE_PTHREADS', '--closure', '1', '-Oz'],), + }) + def test_audio_worklet(self, args): + self.btest_exit(test_file('webaudio/audioworklet.c'), args=['-sAUDIO_WORKLET', '-sWASM_WORKERS'] + args) + + # Tests that posting functions between the main thread and the audioworklet thread works + @parameterized({ + 'default': ([],), + 'closure': (['--closure', '1', '-Oz'],), + }) + def test_audio_worklet_post_function(self, args): + self.btest(test_file('webaudio/audioworklet_post_function.c'), args=['-sAUDIO_WORKLET', '-sWASM_WORKERS'] + args, expected='1') + class emrun(RunnerCore): def test_emrun_info(self): diff --git a/test/test_interactive.py b/test/test_interactive.py index 96cbfbc74cc3e..e3124777f35a7 100644 --- a/test/test_interactive.py +++ b/test/test_interactive.py @@ -11,7 +11,7 @@ raise Exception('do not run this file directly; do something like: test/runner.py interactive') from common import parameterized -from common import BrowserCore, test_file +from common import BrowserCore, test_file, also_with_minimal_runtime from tools.shared import WINDOWS from tools.utils import which @@ -256,3 +256,27 @@ def test_emscripten_hide_mouse(self): def test_webgl_offscreen_canvas_in_two_pthreads(self): for args in [['-sOFFSCREENCANVAS_SUPPORT', '-DTEST_OFFSCREENCANVAS=1'], ['-sOFFSCREEN_FRAMEBUFFER']]: self.btest('gl_in_two_pthreads.cpp', expected='1', args=args + ['-sUSE_PTHREADS', '-lGL', '-sGL_DEBUG', '-sPROXY_TO_PTHREAD']) + + # Tests creating a Web Audio context using Emscripten library_webaudio.js feature. + @also_with_minimal_runtime + def test_web_audio(self): + self.btest('webaudio/create_webaudio.c', expected='0', args=['-lwebaudio.js']) + + # Tests simple AudioWorklet noise generation + @also_with_minimal_runtime + def test_audio_worklet(self): + self.btest('webaudio/audioworklet.c', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '--preload-file', test_file('hello_world.c') + '@/']) + self.btest('webaudio/audioworklet.c', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sUSE_PTHREADS']) + + # Tests AudioWorklet with emscripten_futex_wake(). + @also_with_minimal_runtime + def test_audio_worklet_emscripten_futex_wake(self): + self.btest('webaudio/audioworklet_emscripten_futex_wake.cpp', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2']) + + # Tests a second AudioWorklet example: sine wave tone generator. + def test_audio_worklet_tone_generator(self): + self.btest('webaudio/tone_generator.c', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + + # Tests that AUDIO_WORKLET+MINIMAL_RUNTIME+MODULARIZE combination works together. + def test_audio_worklet_modularize(self): + self.btest('webaudio/audioworklet.c', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sMINIMAL_RUNTIME', '-sMODULARIZE']) diff --git a/test/webaudio/audioworklet.c b/test/webaudio/audioworklet.c new file mode 100644 index 0000000000000..712dd64969c22 --- /dev/null +++ b/test/webaudio/audioworklet.c @@ -0,0 +1,125 @@ +#include +#include +#include +#include + +/* Steps to use Wasm-based AudioWorklets: + 1. Create a Web Audio AudioContext either via manual JS code and calling emscriptenRegisterAudioObject() from JS, or by calling emscripten_create_audio_context() (shown in this sample) + 2. Initialize a Wasm AudioWorklet scope on the audio context by calling emscripten_start_wasm_audio_worklet_thread_async(). This shares the Wasm Module, Memory, etc. to the AudioWorklet scope, + and establishes the stack space for the Audio Worklet. + This needs to be called exactly once during page's lifetime. There is no mechanism in Web Audio to shut down/uninitialize the scope. + 3. Create one or more of Audio Worklet Processors with the desired name and AudioParam configuration. + 4. Instantiate Web Audio audio graph nodes from the above created worklet processors, specifying the desired input-output configurations and Wasm-side function callbacks to call for each node. + 5. Add the graph nodes to the Web Audio graph, and the audio callbacks should begin to fire. +*/ + +#ifdef REPORT_RESULT // This is defined when running in Emscripten test harness. You can strip these out in your own project. +_Thread_local int testTlsVariable = 1; +int lastTlsVariableValueInAudioThread = 1; +#endif + +// This function will be called for every fixed 128 samples of audio to be processed. +EM_BOOL ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData) +{ +#ifdef REPORT_RESULT + assert(testTlsVariable == lastTlsVariableValueInAudioThread); + ++testTlsVariable; + lastTlsVariableValueInAudioThread = testTlsVariable; + assert(emscripten_current_thread_is_audio_worklet()); +#endif + + // Produce noise in all output channels. + for(int i = 0; i < numOutputs; ++i) + for(int j = 0; j < 128*outputs[i].numberOfChannels; ++j) + outputs[i].data[j] = (rand() / (float)RAND_MAX * 2.0f - 1.0f) * 0.3f; + + // We generated audio and want to keep this processor going. Return EM_FALSE here to shut down. + return EM_TRUE; +} + +EM_JS(void, InitHtmlUi, (EMSCRIPTEN_WEBAUDIO_T audioContext, EMSCRIPTEN_AUDIO_WORKLET_NODE_T audioWorkletNode), { + audioContext = emscriptenGetAudioObject(audioContext); + audioWorkletNode = emscriptenGetAudioObject(audioWorkletNode); + + // Add a button on the page to toggle playback as a response to user click. + let startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); + // Connect the audio worklet node to the graph. + audioWorkletNode.connect(audioContext.destination); + } else { + audioContext.suspend(); + } + }; +}); + +#ifdef REPORT_RESULT +EM_BOOL main_thread_tls_access(double time, void *userData) +{ + // Try to mess the TLS variable on the main thread, with the expectation that it should not change + // the TLS value on the AudioWorklet thread. + testTlsVariable = (int)time; + if (lastTlsVariableValueInAudioThread >= 100) + { + REPORT_RESULT(0); + return EM_FALSE; + } + return EM_TRUE; +} +#endif + +// This callback will fire after the Audio Worklet Processor has finished being added to the Worklet global scope. +void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + // Specify the input and output node configurations for the Wasm Audio Worklet. A simple setup with single mono output channel here, and no inputs. + int outputChannelCounts[1] = { 1 }; + + EmscriptenAudioWorkletNodeCreateOptions options = { + .numberOfInputs = 0, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + + // Instantiate the noise-generator Audio Worklet Processor. + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "noise-generator", &options, &ProcessAudio, 0); + +#ifdef REPORT_RESULT + emscripten_set_timeout_loop(main_thread_tls_access, 10, 0); +#endif + + InitHtmlUi(audioContext, wasmAudioWorklet); +} + +// This callback will fire when the Wasm Module has been shared to the AudioWorklet global scope, and is now ready to begin adding Audio Worklet Processors. +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "noise-generator", + }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, AudioWorkletProcessorCreated, 0); +} + +// Define a global stack space for the AudioWorkletGlobalScope. Note that all AudioWorkletProcessors and/or AudioWorkletNodes on the given Audio Context all share the same AudioWorkerGlobalScope, +// i.e. they all run on the same one audio thread (multiple nodes/processors do not each get their own thread). Hence one stack is enough. +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + srand(time(NULL)); + + assert(!emscripten_current_thread_is_audio_worklet()); + + // Create an audio context + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0 /* use default constructor options */); + + // and kick off Audio Worklet scope initialization, which shares the Wasm Module and Memory to the AudioWorklet scope and initializes its stack. + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +} diff --git a/test/webaudio/audioworklet_emscripten_futex_wake.cpp b/test/webaudio/audioworklet_emscripten_futex_wake.cpp new file mode 100644 index 0000000000000..ecce7b50496ca --- /dev/null +++ b/test/webaudio/audioworklet_emscripten_futex_wake.cpp @@ -0,0 +1,79 @@ +#include +#include +#include +#include +#include + +// Tests that +// - _emscripten_thread_supports_atomics_wait() returns true in a Wasm Audio Worklet. +// - emscripten_futex_wake() does not crash in a Wasm Audio Worklet. +// - emscripten_futex_wait() does not crash in a Wasm Audio Worklet. +// - emscripten_get_now() does not crash in a Wasm Audio Worklet. + +int futexLocation = 0; +int testSuccess = 0; + +extern "C" int _emscripten_thread_supports_atomics_wait(void); + +EM_BOOL ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData) +{ + int supportsAtomicWait = _emscripten_thread_supports_atomics_wait(); + printf("supportsAtomicWait: %d\n", supportsAtomicWait); + assert(!supportsAtomicWait); + emscripten_futex_wake(&futexLocation, 1); + printf("%f\n", emscripten_get_now()); + + emscripten_futex_wait(&futexLocation, 1, /*maxWaitMs=*/2); + testSuccess = 1; + + return EM_FALSE; +} + +EM_JS(void, InitHtmlUi, (EMSCRIPTEN_WEBAUDIO_T audioContext, EMSCRIPTEN_AUDIO_WORKLET_NODE_T audioWorkletNode), { + audioContext = emscriptenGetAudioObject(audioContext); + audioWorkletNode = emscriptenGetAudioObject(audioWorkletNode); + let startButton = document.createElement('button'); + startButton.innerHTML = 'Start playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + audioWorkletNode.connect(audioContext.destination); + audioContext.resume(); + }; +}); + +EM_BOOL PollTestSuccess(double, void *) +{ + if (testSuccess) + { + printf("Test success!\n"); +#ifdef REPORT_RESULT + REPORT_RESULT(0); +#endif + return EM_FALSE; + } + return EM_TRUE; +} + +void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + int outputChannelCounts[1] = { 1 }; + EmscriptenAudioWorkletNodeCreateOptions options = { .numberOfInputs = 0, .numberOfOutputs = 1, .outputChannelCounts = outputChannelCounts }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "noise-generator", &options, &ProcessAudio, 0); + InitHtmlUi(audioContext, wasmAudioWorklet); +} + +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + WebAudioWorkletProcessorCreateOptions opts = { .name = "noise-generator" }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, AudioWorkletProcessorCreated, 0); +} + +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + emscripten_set_timeout_loop(PollTestSuccess, 10, 0); + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0); + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +} diff --git a/test/webaudio/audioworklet_post_function.c b/test/webaudio/audioworklet_post_function.c new file mode 100644 index 0000000000000..403ddbb1bbf87 --- /dev/null +++ b/test/webaudio/audioworklet_post_function.c @@ -0,0 +1,44 @@ +#include +#include +#include + +// This test showcases posting messages (function calls) between the main thread and the Audio Worklet thread +// using the emscripten_audio_worklet_post_function_*() API. + +// This event will fire on the main thread. +void MessageReceivedOnMainThread(int d, int e, int f) +{ + printf("MessageReceivedOnMainThread: d=%d, e=%d, f=%d\n", d, e, f); + assert(!emscripten_current_thread_is_audio_worklet()); + assert(d == 1 && e == 2 && f == 3); +#ifdef REPORT_RESULT + REPORT_RESULT(1); // test succeeded, were able to post a message from main thread to audio thread and back! +#endif +} + +// This event will fire on the audio worklet thread. +void MessageReceivedInAudioWorkletThread(int a, int b) +{ + printf("MessageReceivedInAudioWorkletThread: a=%d, b=%d\n", a, b); + assert(emscripten_current_thread_is_audio_worklet()); + assert(a == 42 && b == 9000); + emscripten_audio_worklet_post_function_viii(EMSCRIPTEN_AUDIO_MAIN_THREAD, MessageReceivedOnMainThread, /*d=*/1, /*e=*/2, /*f=*/3); +} + +// This callback will fire when the audio worklet thread has been initialized. +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + printf("WebAudioWorkletThreadInitialized\n"); + emscripten_audio_worklet_post_function_vii(audioContext, MessageReceivedInAudioWorkletThread, /*a=*/42, /*b=*/9000); +} + +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + // Create an audio context + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0 /* use default constructor options */); + + // and kick off Audio Worklet scope initialization, which shares the Wasm Module and Memory to the AudioWorklet scope and initializes its stack. + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +} diff --git a/test/webaudio/create_webaudio.c b/test/webaudio/create_webaudio.c new file mode 100644 index 0000000000000..2f5da5504d26b --- /dev/null +++ b/test/webaudio/create_webaudio.c @@ -0,0 +1,34 @@ +#include + +// This code shows a simple example of how to create a Web Audio context from C/C++ code using the webaudio.h API, +// and how to add a pure sine wave tone generator to it. + +int main() +{ + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0 /* use default constructor options */); + + // Illustrate how this handle can be passed to JS code (e.g. to a JS library function, EM_ASM or a EM_JS block) + EM_ASM({ + var audioContext = emscriptenGetAudioObject($0); + + var oscillator = audioContext.createOscillator(); + oscillator.connect(audioContext.destination); + oscillator.start(); + + // Add a button on the page to toggle playback as a response to user click. + var startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); +#ifdef REPORT_RESULT + __ReportResult(0, 0); +#endif + } else { + audioContext.suspend(); + } + }; + }, context); +} diff --git a/test/webaudio/tone_generator.c b/test/webaudio/tone_generator.c new file mode 100644 index 0000000000000..e19915658987e --- /dev/null +++ b/test/webaudio/tone_generator.c @@ -0,0 +1,149 @@ +#include +#include + +// This program tests that sharing the WebAssembly Memory works between the audio generator thread and the main browser UI thread. +// Two sliders, frequency and volume, can be adjusted on the HTML page, and the audio thread generates a sine wave tone based on +// these parameters. + +// Implement smooth transition between the UI values and the values that the audio callback are actually processing, to avoid crackling when user adjusts the sliders. +float targetToneFrequency = 440.0f; // [shared variable between main thread and audio thread] +float targetVolume = 0.3f; // [shared variable between main thread and audio thread] + +#define SAMPLE_RATE 48000 +#define PI 3.14159265359 + +float phase = 0.f; // [local variable to the audio thread] +float phaseIncrement = 440 * 2.f * PI / SAMPLE_RATE; // [local variable to the audio thread] +float currentVolume = 0.3; // [local variable to the audio thread] + +#ifdef REPORT_RESULT // This is defined when running in Emscripten test harness. You can strip these out in your own project. +volatile int audioProcessedCount = 0; +#endif + +// This function will be called for every fixed 128 samples of audio to be processed. +EM_BOOL ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData) +{ +#ifdef REPORT_RESULT + ++audioProcessedCount; +#endif + + // Interpolate towards the target frequency and volume values. + float targetPhaseIncrement = targetToneFrequency * 2.f * PI / SAMPLE_RATE; + phaseIncrement = phaseIncrement * 0.95f + 0.05f * targetPhaseIncrement; + currentVolume = currentVolume * 0.95f + 0.05f * targetVolume; + + // Produce a sine wave tone of desired frequency to all output channels. + for(int o = 0; o < numOutputs; ++o) + for(int i = 0; i < 128; ++i) + { + float s = emscripten_math_sin(phase); + phase += phaseIncrement; + for(int ch = 0; ch < outputs[o].numberOfChannels; ++ch) + outputs[o].data[ch*128 + i] = s * currentVolume; + } + + // Range reduce to keep precision around zero. + phase = emscripten_math_fmod(phase, 2.f * PI); + + // We generated audio and want to keep this processor going. Return EM_FALSE here to shut down. + return EM_TRUE; +} + +#ifdef REPORT_RESULT +EM_BOOL observe_test_end(double time, void *userData) +{ + if (audioProcessedCount >= 100) + { + REPORT_RESULT(0); + return EM_FALSE; + } + return EM_TRUE; +} +#endif + +// This callback will fire after the Audio Worklet Processor has finished being added to the Worklet global scope. +void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + // Specify the input and output node configurations for the Wasm Audio Worklet. A simple setup with single mono output channel here, and no inputs. + int outputChannelCounts[1] = { 1 }; + + EmscriptenAudioWorkletNodeCreateOptions options = { + .numberOfInputs = 0, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + + // Instantiate the noise-generator Audio Worklet Processor. + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "tone-generator", &options, &ProcessAudio, 0); + + EM_ASM({ + let audioContext = emscriptenGetAudioObject($0); + + // Add a button on the page to toggle playback as a response to user click. + let startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); + let audioWorkletNode = emscriptenGetAudioObject($1); + + // Connect the audio worklet node to the graph. + audioWorkletNode.connect(audioContext.destination); + } else { + audioContext.suspend(); + } + }; + }, audioContext, wasmAudioWorklet); + +#ifdef REPORT_RESULT + emscripten_set_timeout_loop(observe_test_end, 10, 0); +#endif +} + +// This callback will fire when the Wasm Module has been shared to the AudioWorklet global scope, and is now ready to begin adding Audio Worklet Processors. +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "tone-generator", + }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, AudioWorkletProcessorCreated, 0); +} + +// Define a global stack space for the AudioWorkletGlobalScope. Note that all AudioWorkletProcessors and/or AudioWorkletNodes on the given Audio Context all share the same AudioWorkerGlobalScope, +// i.e. they all run on the same one audio thread (multiple nodes/processors do not each get their own thread). Hence one stack is enough. +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + // Add a UI slider to the page to adjust the pitch of the tone. + EM_ASM({ + let div = document.createElement('div'); + div.innerHTML = 'Choose frequency: 440
' + + 'Choose volume: 30%
'; + document.body.appendChild(div); + document.querySelector('#pitch').oninput = (e) => { + document.querySelector('#pitchValue').innerHTML = HEAPF32[$0>>2] = parseInt(e.target.value); + }; + document.querySelector('#volume').oninput = (e) => { + HEAPF32[$1>>2] = parseInt(e.target.value) / 100; + document.querySelector('#volumeValue').innerHTML = parseInt(e.target.value) + '%'; + }; + }, &targetToneFrequency, &targetVolume); + + // Create an audio context + EmscriptenWebAudioCreateAttributes attrs = { + .latencyHint = "interactive", + .sampleRate = SAMPLE_RATE + }; + + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(&attrs); + + // and kick off Audio Worklet scope initialization, which shares the Wasm Module and Memory to the AudioWorklet scope and initializes its stack. + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +} diff --git a/tools/feature_matrix.py b/tools/feature_matrix.py index b514a4412c765..9530d5df56c26 100644 --- a/tools/feature_matrix.py +++ b/tools/feature_matrix.py @@ -21,6 +21,7 @@ class Feature(IntEnum): MUTABLE_GLOBALS = auto() JS_BIGINT_INTEGRATION = auto() THREADS = auto() + GLOBALTHIS = auto() default_features = {Feature.SIGN_EXT, Feature.MUTABLE_GLOBALS} @@ -56,6 +57,13 @@ class Feature(IntEnum): 'firefox': 79, 'safari': 140100, }, + Feature.GLOBALTHIS: { + 'chrome': 71, + 'edge': 79, + 'firefox': 65, + 'safari': 120100, + # 'node': 120000 + }, } diff --git a/tools/file_packager.py b/tools/file_packager.py index 4ed3ea50b6fa1..9ee8e4d2e90c9 100755 --- a/tools/file_packager.py +++ b/tools/file_packager.py @@ -587,9 +587,8 @@ def generate_js(data_target, data_files, metadata): Module.expectedDataFileDownloads++; (function() { - // When running as a pthread, FS operations are proxied to the main thread, so we don't need to - // fetch the .data bundle on the worker - if (Module['ENVIRONMENT_IS_PTHREAD']) return; + // Do not attempt to redownload the virtual filesystem data when in a pthread or a Wasm Worker context. + if (Module['ENVIRONMENT_IS_PTHREAD'] || Module['$ww']) return; var loadPackage = function(metadata) {\n''' code = ''' diff --git a/tools/js_manipulation.py b/tools/js_manipulation.py index cf9fc90d0abb6..307018f880fcd 100644 --- a/tools/js_manipulation.py +++ b/tools/js_manipulation.py @@ -44,7 +44,7 @@ def add_files_pre_js(pre_js_list, files_pre_js): utils.write_file(pre, ''' // All the pre-js content up to here must remain later on, we need to run // it. - if (Module['ENVIRONMENT_IS_PTHREAD']) Module['preRun'] = []; + if (Module['ENVIRONMENT_IS_PTHREAD'] || Module['$ww']) Module['preRun'] = []; var necessaryPreJSTasks = Module['preRun'].slice(); ''') utils.write_file(post, '''