Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: adding sample app for the media core #15

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"@types/sdp-transform": "^2.4.5",
"@typescript-eslint/eslint-plugin": "^4.28.2",
"@typescript-eslint/parser": "^4.28.2",
"@web/dev-server": "^0.1.35",
"chai": "^4.3.4",
"cspell": "^5.6.6",
"eslint": "^7.29.0",
Expand Down Expand Up @@ -79,6 +80,7 @@
"build": "run-s clean compile",
"clean": "npm run transpile:clean && npm run docs:clean",
"compile": "rollup -c ./rollup.config.js",
"start": "web-dev-server --config web-dev-server.config.js",
"docs": "npm run docs:clean && npm run docs:extract && npm run docs:generate",
"docs:clean": "rimraf ./docs",
"docs:extract": "api-extractor run -c ./api-extractor.json",
Expand Down
5 changes: 5 additions & 0 deletions rollup.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ export default [
sourcemap: !production,
plugins: [terser()],
},
{
format: 'umd',
name: 'webrtcCore',
file: './samples/bundle.js',
},
],
plugins,
watch: false,
Expand Down
285 changes: 285 additions & 0 deletions samples/app.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,285 @@
const kindOfDevices = {
AUDIO_INPUT: 'audioinput',
AUDIO_OUTPUT: 'audiooutput',
VIDEO_INPUT: 'videoinput',
};
const {
getMicrophones,
getCameras,
getSpeakers,
createCameraTrack,
createMicrophoneTrack,
createDisplayTrack,
staticVideoEncoderConfig,
} = webrtcCore;
const videoElement = document.querySelector('video#localVideo');
const screenshareElement = document.querySelector('video#localScreenshare');
const audioInputSelect = document.querySelector('select#audioSource');
const audioOutputSelect = document.querySelector('select#audioOutput');
const videoInputSelect = document.querySelector('select#videoSource');
const selectors = [audioInputSelect, audioOutputSelect, videoInputSelect];
let localAudioTrack;
let cameraTrack;

// This function is for handling error if promises getting failed
/**
* @param error
*/
function handleError(error) {
// eslint-disable-next-line no-console
console.log('webrtcCore Media error: ', error.message, error.name);
}

// This function will create option for selectbox and return that option to caller
/**
* @param device
*/
function buildSelectOption(device) {
const childOption = document.createElement('option');

childOption.value = device.deviceId;

return childOption;
}

// This function is for building dropdowns of Microphones
/**
* @param audioInputDevices
*/
function buildAudioInputSelection(audioInputDevices) {
audioInputDevices.forEach((audioInputDevice) => {
const childOption = buildSelectOption(audioInputDevice);

childOption.text = audioInputDevice.label || `Microphone ${audioInputSelect.length + 1}`;
audioInputSelect.appendChild(childOption);
});
}

// This function is for building dropdowns of Speakers
/**
* @param audioOutputDevices
*/
function buildAudioOutputSelection(audioOutputDevices) {
audioOutputDevices.forEach((audioOutputDevice) => {
const childOption = buildSelectOption(audioOutputDevice);

childOption.text = audioOutputDevice.label || `Speaker ${audioInputSelect.length + 1}`;
audioOutputSelect.appendChild(childOption);
});
}

// This function is for building dropdowns of Cameras
/**
* @param videoInputDevices
*/
function buildVideoInputSelection(videoInputDevices) {
videoInputDevices.forEach((videoInputDevice) => {
const childOption = buildSelectOption(videoInputDevice);

childOption.text = videoInputDevice.label || `Camera ${audioInputSelect.length + 1}`;
videoInputSelect.appendChild(childOption);
});
}

// This function is running screenshare video stream in box of local screen share
/**
* @param contentTrack
*/
function buildLocalScreenshare(contentTrack) {
screenshareElement.srcObject = new MediaStream([contentTrack.getMediaStreamTrack()]);
}

// This functions is for building dropdowns of all selectboxes and preserve the previously selected value.
/**
* @param devices
*/
function gotDevices(devices) {
// Handles being called several times to update labels. Preserve values.

const values = selectors.map((selector) => selector.value);

selectors.forEach((selector) => {
while (selector.firstChild) {
selector.removeChild(selector.firstChild);
}
});

buildAudioInputSelection(devices[0]);
buildVideoInputSelection(devices[1]);
buildAudioOutputSelection(devices[2]);

selectors.forEach((selector, selectorIndex) => {
if (
Array.prototype.slice.call(selector.childNodes).some((n) => n.value === values[selectorIndex])
) {
// eslint-disable-next-line no-param-reassign
selector.value = values[selectorIndex];
}
});
}

// This functions is for running video streams in boxes and return the promise of getting all devices.
/**
* @param root0
* @param root0."0"
* @param root0."1"
* @param root0."2"
*/
function gotTracks([localVideo, localAudio, localContent]) {
buildLocalVideo(localVideo);
buildLocalScreenshare(localContent);
localAudioTrack = localAudio;

const devicePromises = [getMicrophones(), getCameras(), getSpeakers()];

return Promise.all(devicePromises);
}

/**
*
*/
async function gatherDevices() {
const devicePromises = [getMicrophones(), getCameras(), getSpeakers()];

const devices = await Promise.all(devicePromises);

gotDevices(devices);
}

// This function is starting point of app. it will will get all tracks and then run next operation.
/**
* @param root0
* @param root0.audioPayload
* @param root0.videoPayload
* @param root0.contentPayload
*/
function start({ audioPayload = '', videoPayload = '', contentPayload = '' }) {
const trackPromises = [
createCameraTrack(videoPayload),
createMicrophoneTrack(audioPayload),
createDisplayTrack(contentPayload),
];

Promise.all(trackPromises).catch(handleError);
}

// This function is for changing audio input device on dropdown change and clicking the button updateAudio.
// eslint-disable-next-line @typescript-eslint/no-unused-vars
/**
*
*/
async function setAudioInputDevice() {
const deviceId = audioInputSelect.value;

const audioPayload = {
ID: deviceId,
kind: kindOfDevices.AUDIO_INPUT,
};

const audioTrack = await createMicrophoneTrack({ microphoneDeviceid: audioPayload.ID });

audioTrack.play();
}

// This function is for changing video input device on dropdown change and clicking the button updateVideo.
// eslint-disable-next-line @typescript-eslint/no-unused-vars
/**
*
*/
async function setVideoInputDevice() {
const deviceId = videoInputSelect.value;

const videoPayload = {
ID: deviceId,
kind: kindOfDevices.VIDEO_INPUT,
};
const resolution = document.getElementById('resolution').value;
const constraint = staticVideoEncoderConfig[resolution];
cameraTrack = await createCameraTrack({ cameraDeviceId: videoPayload.ID ,encoderConfig: constraint });
cameraTrack.play(videoElement);

// cameraTrack.getMediaStreamTrack().getSettings()
}

async function applyResolution() {
const resolution = document.getElementById('resolution').value;
const constraint = staticVideoEncoderConfig[resolution];
cameraTrack.setEncoderConfig(constraint);
}

/*
Background Noise Reduction (BNR) methods starts
*/

const listenToAudioBtn = document.getElementById('listenToAudio');
const enableBnrBtn = document.getElementById('enableBnrBtn');
const disableBnrBtn = document.getElementById('disableBnrBtn');

const bnrAudioOutput = document.getElementById('bnr-audio');

let rawAudioStream;
let isListening = false;

/**
* Method to toggle audio listening for BNR effect
* called as part of clicking #listenToAudio button.
*/
const toggleAudioListen = async () => {
if (!isListening) {
listenToAudioBtn.setAttribute('disabled', true);

rawAudioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
bnrAudioOutput.srcObject = rawAudioStream;

listenToAudioBtn.innerText = 'Stop listening to Audio';
listenToAudioBtn.removeAttribute('disabled');

enableBnrBtn.removeAttribute('disabled');

isListening = true;
} else {
listenToAudioBtn.innerText = 'Start listening to Audio';

enableBnrBtn.setAttribute('disabled', true);
disableBnrBtn.setAttribute('disabled', true);
bnrAudioOutput.srcObject = null;

isListening = false;
}
};

/**
* Method to enableBNR
* called as part of clicking #enableBnrBtn button.
*/
const enableBNR = async () => {
const audiotrack = rawAudioStream.getAudioTracks()[0];

const bnrAudioTrack = await mediaMethods.Effects.BNR.enableBNR(audiotrack);

const bnrAudioStream = new MediaStream();

bnrAudioStream.addTrack(bnrAudioTrack);

bnrAudioOutput.srcObject = bnrAudioStream;

enableBnrBtn.setAttribute('disabled', true);
disableBnrBtn.removeAttribute('disabled');
};

/**
* Method to disableBNR
* called as part of clicking #disableBnrBtn button.
*/
const disableBNR = () => {
const bnrDisabledAudioTrack = mediaMethods.Effects.BNR.disableBNR();

const bnrDisabledAudioStream = new MediaStream();

bnrDisabledAudioStream.addTrack(bnrDisabledAudioTrack);

bnrAudioOutput.srcObject = bnrDisabledAudioStream;

disableBnrBtn.setAttribute('disabled', true);
enableBnrBtn.removeAttribute('disabled');
};
Loading