forked from webrtc/samples
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request webrtc#1423 from dogben/bbx-audio
Add new sample for audio processing using insertable streams
- Loading branch information
Showing
3 changed files
with
226 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
74 changes: 74 additions & 0 deletions
74
src/content/insertable-streams/audio-processing/index.html
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
<!DOCTYPE html> | ||
<!-- | ||
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. | ||
* | ||
* Use of this source code is governed by a BSD-style license | ||
* that can be found in the LICENSE file in the root of the source | ||
* tree. | ||
--> | ||
<html> | ||
<head> | ||
|
||
<meta charset="utf-8"> | ||
<meta name="description" content="WebRTC code samples"> | ||
<meta name="viewport" content="width=device-width, user-scalable=yes, initial-scale=1, maximum-scale=1"> | ||
<meta itemprop="description" content="Client-side WebRTC code samples"> | ||
<meta itemprop="image" content="../../../images/webrtc-icon-192x192.png"> | ||
<meta itemprop="name" content="WebRTC code samples"> | ||
<meta name="mobile-web-app-capable" content="yes"> | ||
<meta id="theme-color" name="theme-color" content="#ffffff"> | ||
|
||
<base target="_blank"> | ||
|
||
<title>Insertable Streams - Audio</title> | ||
|
||
<link rel="icon" sizes="192x192" href="../../../images/webrtc-icon-192x192.png"> | ||
<link href="//fonts.googleapis.com/css?family=Roboto:300,400,500,700" rel="stylesheet" type="text/css"> | ||
<link rel="stylesheet" href="../../../css/main.css"> | ||
|
||
</head> | ||
|
||
<body> | ||
|
||
<div id="container"> | ||
|
||
<h1><a href="//webrtc.github.io/samples/" title="WebRTC samples homepage">WebRTC samples</a> | ||
<span>Audio processing with insertable streams</span> | ||
</h1> | ||
|
||
<p>This sample shows how to perform processing on an audio stream using the experimental | ||
<a href="https://github.com/w3c/mediacapture-insertable-streams">insertable streams</a> API. | ||
It applies a low-pass filter in realtime to audio recorded from a microphone and plays it | ||
back. | ||
</p> | ||
<audio id="audioOutput" controls></audio> | ||
|
||
<div> | ||
<button type="button" id="startButton">Start</button> | ||
<button type="button" id="stopButton" disabled>Stop</button> | ||
<div> | ||
<p class="warning">Warning: if you're not using headphones, pressing Start will cause feedback.</p> | ||
|
||
<p>View the console to see logging. The <code>audio</code>, <code>processor</code>, | ||
<code>generator</code>, <code>transformer</code>, <code>stream</code> and | ||
<code>processedStream</code> variables are in global scope, so you can inspect them from the | ||
console. You may also adjust the level of filtering by assigning to <code>cutoff</code>.</p> | ||
|
||
<p> | ||
<b>Note</b>: This sample is using an experimental API that has not yet been standardized. As | ||
of 2021-02-09, this API is available in Chrome M90 if the experimental code is enabled on | ||
the command line with | ||
<code>--enable-blink-features=WebCodecs,MediaStreamInsertableStreams</code>. | ||
</p> | ||
<a href="https://github.com/webrtc/samples/tree/gh-pages/src/content/insertable-streams/audio-processing" | ||
title="View source for this page on GitHub" id="viewSource">View source on GitHub</a> | ||
|
||
</div> | ||
|
||
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script> | ||
<script src="js/main.js" async></script> | ||
|
||
<script src="../../../js/lib/ga.js"></script> | ||
|
||
</body> | ||
</html> |
151 changes: 151 additions & 0 deletions
151
src/content/insertable-streams/audio-processing/js/main.js
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,151 @@ | ||
/* | ||
* Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. | ||
* | ||
* Use of this source code is governed by a BSD-style license | ||
* that can be found in the LICENSE file in the root of the source | ||
* tree. | ||
*/ | ||
|
||
'use strict'; | ||
|
||
/* global MediaStreamTrackProcessor, MediaStreamTrackGenerator */ | ||
if (typeof MediaStreamTrackProcessor === 'undefined' || | ||
typeof MediaStreamTrackGenerator === 'undefined') { | ||
alert( | ||
'Your browser does not support the experimental MediaStreamTrack API ' + | ||
'for Insertable Streams of Media. See the note at the bottom of the ' + | ||
'page.'); | ||
} | ||
|
||
try { | ||
new MediaStreamTrackGenerator('audio'); | ||
console.log('Audio insertable streams supported'); | ||
} catch (e) { | ||
alert( | ||
'Your browser does not support insertable audio streams. See the note ' + | ||
'at the bottom of the page.'); | ||
} | ||
|
||
// Put variables in global scope to make them available to the browser console. | ||
|
||
// Audio element | ||
let audio; | ||
|
||
// Buttons | ||
let startButton; | ||
let stopButton; | ||
|
||
// Transformation chain elements | ||
let processor; | ||
let generator; | ||
let transformer; | ||
|
||
// Stream from getUserMedia | ||
let stream; | ||
// Output from the transform | ||
let processedStream; | ||
|
||
// Adjust this value to increase/decrease the amount of filtering. | ||
// eslint-disable-next-line prefer-const | ||
let cutoff = 100; | ||
|
||
// An AbortController used to stop the transform. | ||
let abortController; | ||
|
||
// Initialize on page load. | ||
async function init() { | ||
audio = document.getElementById('audioOutput'); | ||
startButton = document.getElementById('startButton'); | ||
stopButton = document.getElementById('stopButton'); | ||
|
||
startButton.onclick = start; | ||
stopButton.onclick = stop; | ||
} | ||
|
||
const constraints = window.constraints = { | ||
audio: true, | ||
video: false | ||
}; | ||
|
||
// Returns a low-pass transform function for use with TransformStream. | ||
function lowPassFilter() { | ||
let lastValuePerChannel = undefined; | ||
return (frame, controller) => { | ||
const rc = 1.0 / (cutoff * 2 * Math.PI); | ||
const dt = 1.0 / frame.buffer.sampleRate; | ||
const alpha = dt / (rc + dt); | ||
const nChannels = frame.buffer.numberOfChannels; | ||
if (!lastValuePerChannel) { | ||
console.log(`Audio stream has ${nChannels} channels.`); | ||
lastValuePerChannel = Array(nChannels).fill(0); | ||
} | ||
for (let c = 0; c < nChannels; c++) { | ||
const samples = frame.buffer.getChannelData(c); | ||
let lastValue = lastValuePerChannel[c]; | ||
|
||
// Apply low-pass filter to samples. | ||
for (let i = 0; i < samples.length; ++i) { | ||
lastValue = lastValue + alpha * (samples[i] - lastValue); | ||
samples[i] = lastValue; | ||
} | ||
|
||
frame.buffer.copyToChannel(samples, c); | ||
lastValuePerChannel[c] = lastValue; | ||
} | ||
controller.enqueue(frame); | ||
}; | ||
} | ||
|
||
async function start() { | ||
startButton.disabled = true; | ||
try { | ||
stream = await navigator.mediaDevices.getUserMedia(constraints); | ||
} catch (error) { | ||
const errorMessage = 'navigator.MediaDevices.getUserMedia error: ' + error.message + ' ' + error.name; | ||
document.getElementById('errorMsg').innerText = errorMessage; | ||
console.log(errorMessage); | ||
} | ||
const audioTracks = stream.getAudioTracks(); | ||
console.log('Using audio device: ' + audioTracks[0].label); | ||
stream.oninactive = () => { | ||
console.log('Stream ended'); | ||
}; | ||
|
||
processor = new MediaStreamTrackProcessor(audioTracks[0]); | ||
generator = new MediaStreamTrackGenerator('audio'); | ||
const source = processor.readable; | ||
const sink = generator.writable; | ||
transformer = new TransformStream({transform: lowPassFilter()}); | ||
abortController = new AbortController(); | ||
const signal = abortController.signal; | ||
const promise = source.pipeThrough(transformer, {signal}).pipeTo(sink); | ||
promise.catch((e) => { | ||
if (signal.aborted) { | ||
console.log('Shutting down streams after abort.'); | ||
} else { | ||
console.error('Error from stream transform:', e); | ||
} | ||
source.cancel(e); | ||
sink.abort(e); | ||
}); | ||
|
||
processedStream = new MediaStream(); | ||
processedStream.addTrack(generator); | ||
audio.srcObject = processedStream; | ||
stopButton.disabled = false; | ||
await audio.play(); | ||
} | ||
|
||
async function stop() { | ||
stopButton.disabled = true; | ||
audio.pause(); | ||
audio.srcObject = null; | ||
stream.getTracks().forEach(track => { | ||
track.stop(); | ||
}); | ||
abortController.abort(); | ||
abortController = null; | ||
startButton.disabled = false; | ||
} | ||
|
||
window.onload = init; |