Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
76 commits
Select commit Hold shift + click to select a range
c9a6600
amplify-chatbot initial import
Aug 4, 2020
cc5d16b
Use interface and comment out test
Aug 4, 2020
c6e33e1
Merge remote-tracking branch 'upstream/main' into ui-components/chatbot
Aug 4, 2020
1434414
Expose additional css variables and add icon variant to button
Aug 4, 2020
a2c5396
Merge remote-tracking branch 'upstream/main' into ui-components/chatbot
Aug 4, 2020
49f7e36
Update snapshot
Aug 4, 2020
6c35728
Clean up code
Aug 4, 2020
bd8a99c
Remove unused test case
Aug 4, 2020
5f404e8
Add snapshot testing
Aug 11, 2020
b09348f
Apply comments from @ashika01
Aug 12, 2020
a865263
Rename --icon-color to --icon-fill
Aug 12, 2020
82155ff
Remove unused class css
Aug 12, 2020
d504ff8
Update css for compatibility with existing components
Aug 12, 2020
e686aa3
Set default height
Aug 12, 2020
dc4dbb1
Integrate Interactions text message
Aug 12, 2020
2a8d052
Update snapshots
Aug 12, 2020
0189195
Simplify code
Aug 12, 2020
5d8e16c
Add audiorecorder and integrate voice chat
Aug 13, 2020
7aadabc
Use interface over type
Aug 13, 2020
1a07f47
Reorder functions and add byte descriptions
Aug 13, 2020
346a4cc
Add loading animation
Aug 17, 2020
7b0e4cb
Update interaction types
Aug 17, 2020
9e9163f
Scroll to bottom
Aug 17, 2020
655761c
set methods private
Aug 17, 2020
7d03591
Rename css class
Aug 17, 2020
f59e7ea
Update snapshot
Aug 17, 2020
52e73ee
Add error handling and reorder functions
Aug 18, 2020
ce066a1
Refactor error handling
Aug 18, 2020
5a9a0f8
Refactor chatbot functions
Aug 19, 2020
399dc3b
Cleanup
Aug 19, 2020
d8ef45d
Update snapshot
Aug 19, 2020
41aca35
Expose width css variable from amplify-button
Aug 19, 2020
8eb2a44
px to rem
Aug 19, 2020
9271bb1
Expose width and height variable; Control height at top level
Aug 19, 2020
9389a64
Add header slot
Aug 19, 2020
2dfb93e
Add listening animation
Aug 20, 2020
3c59258
Cleanup
Aug 20, 2020
343af4b
Update angular module
Aug 20, 2020
ad464d7
Move visualization to helper and downsample data array
Aug 20, 2020
2eab9bc
Separate animation scss
Aug 20, 2020
8fe2de1
Remove console.logs
Aug 20, 2020
499bdb9
Control width / height at host; expose message colors
Aug 20, 2020
9579c7e
Use I18n with strings
Aug 20, 2020
b4b42e3
Fix typo
Aug 20, 2020
8869e26
Use enum for chat state
Aug 21, 2020
3fa948d
Revert width back to 100%
Aug 21, 2020
93baeb8
Rename updateProps to validateProps
Aug 21, 2020
3f7a81c
Separate out interaction enum strings
Aug 21, 2020
89e1260
Move MIME type string to constants file
Aug 21, 2020
1b394de
Use async/await pattern in recorder.ts
Aug 21, 2020
cfdfd20
Check isBrowser and add silence props
Aug 21, 2020
a5ebb9f
Separate init from recorder for async control
Aug 21, 2020
d0618e2
Remove fieldId
Aug 21, 2020
bd89fa8
Add try catch around Interactions.send
Aug 21, 2020
84199c7
Remove requestId
Aug 21, 2020
185e7ab
Update snapshot
Aug 21, 2020
ac6d02e
Expose Interactions types
Aug 21, 2020
e5a1d88
Remove duplicate logic
Aug 21, 2020
aee7e65
Use enum to describe where the message is from
Aug 21, 2020
2e5acc5
Clean up css and set enum value
Aug 21, 2020
29ef508
Add slot description
Aug 21, 2020
4bed1db
Simplify import
Aug 21, 2020
4eb9e9a
Default noop to visualizer
Aug 21, 2020
aa55a32
Comment AudioRecorder and separate constants
Aug 21, 2020
9d2afab
Update snapshot
Aug 21, 2020
7cbbf86
Reorder css
Aug 23, 2020
3bbd620
Enable conversationModeOn prop
Aug 24, 2020
ee0dee0
Merge branch 'ui-components/chatbot-staging' of https://github.com/aw…
Aug 24, 2020
0acd8f8
Update packages/amplify-ui-components/src/common/audio-control/helper.ts
wlee221 Aug 24, 2020
3d058f6
Move error strings to translations
Aug 24, 2020
bd8d6dd
Remove trailing comma
Aug 24, 2020
cf3ef0f
Wrap audioContext resume with error logger
Aug 24, 2020
c073c3f
Try catch `resume` and make startRecording async
Aug 24, 2020
a59bbf9
Merge branch 'ui-components/chatbot-staging' of https://github.com/aw…
Aug 24, 2020
378e921
Merge branch 'main' into ui-components/chatbot-staging
Aug 24, 2020
212d4a5
Use callback based decode for safari
Aug 24, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions packages/amplify-ui-angular/src/amplify-module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
AmplifyAuthenticator,
AmplifyAuthFields,
AmplifyButton,
AmplifyChatbot,
AmplifyCheckbox,
AmplifyCodeField,
AmplifyConfirmSignIn,
Expand Down Expand Up @@ -65,6 +66,7 @@ const DECLARATIONS = [
AmplifyAuthenticator,
AmplifyAuthFields,
AmplifyButton,
AmplifyChatbot,
AmplifyCheckbox,
AmplifyCodeField,
AmplifyConfirmSignIn,
Expand Down
11 changes: 9 additions & 2 deletions packages/amplify-ui-components/src/common/Translations.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,5 +109,12 @@ export enum AuthStrings {
SIGN_UP_FAILED = 'Sign Up Failed',
}

type Translations = AuthErrorStrings | AuthStrings;
export const Translations = { ...AuthStrings, ...AuthErrorStrings };
export enum InteractionsStrings {
CHATBOT_TITLE = 'ChatBot Lex',
TEXT_INPUT_PLACEHOLDER = 'Write a message',
CHAT_DISABLED_ERROR = 'Error: Either voice or text must be enabled for the chatbot',
NO_BOT_NAME_ERROR = 'Error: Bot Name must be provided to ChatBot',
}

type Translations = AuthErrorStrings | AuthStrings | InteractionsStrings;
export const Translations = { ...AuthStrings, ...AuthErrorStrings, ...InteractionsStrings };
127 changes: 127 additions & 0 deletions packages/amplify-ui-components/src/common/audio-control/helper.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import { RECORDER_EXPORT_MIME_TYPE } from './settings';

/**
* Merges multiple buffers into one.
*/
const mergeBuffers = (bufferArray: Float32Array[], recLength: number) => {
const result = new Float32Array(recLength);
let offset = 0;
for (let i = 0; i < bufferArray.length; i++) {
result.set(bufferArray[i], offset);
offset += bufferArray[i].length;
}
return result;
};

/**
* Downsamples audio to desired export sample rate.
*/
const downsampleBuffer = (buffer: Float32Array, recordSampleRate: number, exportSampleRate: number) => {
if (exportSampleRate === recordSampleRate) {
return buffer;
}
const sampleRateRatio = recordSampleRate / exportSampleRate;
const newLength = Math.round(buffer.length / sampleRateRatio);
const result = new Float32Array(newLength);
let offsetResult = 0;
let offsetBuffer = 0;
while (offsetResult < result.length) {
const nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
let accum = 0,
count = 0;
for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
};

/**
* converts raw audio values to 16 bit pcm.
*/
const floatTo16BitPCM = (output: DataView, offset: number, input: Float32Array) => {
let byteOffset = offset;
for (let i = 0; i < input.length; i++, byteOffset += 2) {
const s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(byteOffset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
}
};

/**
* Write given strings in big-endian order.
*/
const writeString = (view: DataView, offset: number, string: string) => {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
};

/**
* Encodes raw pcm audio into a wav file.
*/
const encodeWAV = (samples: Float32Array, exportSampleRate?: number) => {
/**
* WAV file consists of three parts: RIFF header, WAVE subchunk, and data subchunk. We precompute the size of them.
*/

const audioSize = samples.length * 2; // We use 16-bit samples, so we have (2 * sampleLength) bytes.
const fmtSize = 24; // Byte size of the fmt subchunk: 24 bytes that the audio information that we'll set below.
const dataSize = 8 + audioSize; // Byte size of the data subchunk: raw sound data plus 8 bytes for the subchunk descriptions.

const totalByteSize = 12 + fmtSize + dataSize; // Byte size of the whole file, including the chunk header / descriptor.

// create DataView object to write byte values into
const buffer = new ArrayBuffer(totalByteSize); // buffer to write the chunk values in.
const view = new DataView(buffer);

/**
* Start writing the .wav file. We write top to bottom, so byte offset (first numeric argument) increases strictly.
*/
// RIFF header
writeString(view, 0, 'RIFF'); // At offset 0, write the letters "RIFF"
view.setUint32(4, fmtSize + dataSize, true); // At offset 4, write the size of fmt and data chunk size combined.
writeString(view, 8, 'WAVE'); // At offset 8, write the format type "WAVE"

// fmt subchunk
writeString(view, 12, 'fmt '); //chunkdId 'fmt '
view.setUint32(16, fmtSize - 8, true); // fmt subchunk size below this value. We set 8 bytes already, so subtract 8 bytes from fmtSize.
view.setUint16(20, 1, true); // Audiio format code, which is 1 for PCM.
view.setUint16(22, 1, true); // Number of audio channels. We use mono, ie 1.
view.setUint32(24, exportSampleRate, true); // Sample rate of the audio file.
view.setUint32(28, exportSampleRate * 2, true); // Data rate, or # of data bytes per second. Since each sample is 2 bytes, this is 2 * sampleRate.
view.setUint16(32, 2, true); // block align, # of bytes per sample including all channels, ie. 2 bytes.
view.setUint16(34, 16, true); // bits per sample, ie. 16 bits

// data subchunk
writeString(view, 36, 'data'); // write the chunkId 'data'
view.setUint32(40, audioSize, true); // Audio byte size
floatTo16BitPCM(view, 44, samples); // raw pcm values then go here.
return view;
};

/**
* Given arrays of raw pcm audio, downsamples the audio to desired sample rate and encodes it to a wav audio file.
*
* @param recBuffer {Float32Array[]} - 2d float array containing the recorded raw audio
* @param recLength {number} - total length of recorded audio
* @param recordSampleRate {number} - sample rate of the recorded audio
* @param exportSampleRate {number} - desired sample rate of the exported file
*/
export const exportBuffer = (
recBuffer: Float32Array[],
recLength: number,
recordSampleRate: number,
exportSampleRate: number,
) => {
const mergedBuffers = mergeBuffers(recBuffer, recLength);
const downsampledBuffer = downsampleBuffer(mergedBuffers, recordSampleRate, exportSampleRate);
const encodedWav = encodeWAV(downsampledBuffer, exportSampleRate);
const audioBlob = new Blob([encodedWav], {
type: RECORDER_EXPORT_MIME_TYPE,
});
return audioBlob;
};
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
export * from './recorder';
export * from './helper';
export * from './visualizer';
210 changes: 210 additions & 0 deletions packages/amplify-ui-components/src/common/audio-control/recorder.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
import { exportBuffer } from './helper';
import { browserOrNode, Logger } from '@aws-amplify/core';
import {
DEFAULT_EXPORT_SAMPLE_RATE,
FFT_MAX_DECIBELS,
FFT_MIN_DECIBELS,
FFT_SIZE,
FFT_SMOOTHING_TIME_CONSTANT,
} from './settings';

interface SilenceDetectionConfig {
time: number;
amplitude: number;
}

type SilenceHandler = () => void;
type Visualizer = (dataArray: Uint8Array, bufferLength: number) => void;
const logger = new Logger('AudioRecorder');

export class AudioRecorder {
private options: SilenceDetectionConfig;
private audioContext: AudioContext;
private audioSupported: boolean;

private analyserNode: AnalyserNode;
private onSilence: SilenceHandler;
private visualizer: Visualizer;

// input mic stream is stored in a buffer
private streamBuffer: Float32Array[] = [];
private streamBufferLength = 0;

// recording props
private start: number;
private recording = false;

constructor(options: SilenceDetectionConfig) {
this.options = options;
}

/**
* This must be called first to enable audio context and request microphone access.
* Once access granted, it connects all the necessary audio nodes to the context so that it can begin recording or playing.
*/
async init() {
if (browserOrNode().isBrowser) {
window.AudioContext = window.AudioContext || (window as any).webkitAudioContext;
this.audioContext = new AudioContext();
await navigator.mediaDevices
.getUserMedia({ audio: true })
.then(stream => {
this.audioSupported = true;
this.setupAudioNodes(stream);
})
.catch(() => {
this.audioSupported = false;
return Promise.reject('Audio is not supported');
});
} else {
this.audioSupported = false;
return Promise.reject('Audio is not supported');
}
}

/**
* Setup audio nodes after successful `init`.
*/
private async setupAudioNodes(stream: MediaStream) {
try {
await this.audioContext.resume();
} catch (err) {
logger.error(err);
}
const sourceNode = this.audioContext.createMediaStreamSource(stream);
const processorNode = this.audioContext.createScriptProcessor(4096, 1, 1);

processorNode.onaudioprocess = audioProcessingEvent => {
if (!this.recording) return;
const stream = audioProcessingEvent.inputBuffer.getChannelData(0);
this.streamBuffer.push(new Float32Array(stream)); // set to a copy of the stream
this.streamBufferLength += stream.length;
this.analyse();
};

const analyserNode = this.audioContext.createAnalyser();
analyserNode.minDecibels = FFT_MIN_DECIBELS;
analyserNode.maxDecibels = FFT_MAX_DECIBELS;
analyserNode.smoothingTimeConstant = FFT_SMOOTHING_TIME_CONSTANT;

sourceNode.connect(analyserNode);
analyserNode.connect(processorNode);
processorNode.connect(sourceNode.context.destination);

this.analyserNode = analyserNode;
}

/**
* Start recording audio and listen for silence.
*
* @param onSilence {SilenceHandler} - called whenever silence is detected
* @param visualizer {Visualizer} - called with audio data on each audio process to be used for visualization.
*/
public async startRecording(onSilence?: SilenceHandler, visualizer?: Visualizer) {
if (this.recording || !this.audioSupported) return;
this.onSilence = onSilence || function() {};
this.visualizer = visualizer || function() {};

const context = this.audioContext;
try {
await context.resume();
} catch (err) {
logger.error(err);
}
this.start = Date.now();
this.recording = true;
}

/**
* Pause recording
*/
public stopRecording() {
if (!this.audioSupported) return;
this.recording = false;
}

/**
* Pause recording and clear audio buffer
*/
public clear() {
this.stopRecording();
this.streamBufferLength = 0;
this.streamBuffer = [];
}

/**
* Plays given audioStream with audioContext
*
* @param buffer {Uint8Array} - audioStream to be played
*/
public play(buffer: Uint8Array) {
if (!buffer || !this.audioSupported) return;
const myBlob = new Blob([buffer]);

return new Promise((res, rej) => {
const fileReader = new FileReader();
fileReader.onload = () => {
const playbackSource = this.audioContext.createBufferSource();

const successCallback = (buf: AudioBuffer) => {
playbackSource.buffer = buf;
playbackSource.connect(this.audioContext.destination);
playbackSource.onended = () => {
return res();
};
playbackSource.start(0);
};
const errorCallback = err => {
return rej(err);
};

this.audioContext.decodeAudioData(fileReader.result as ArrayBuffer, successCallback, errorCallback);
};
fileReader.onerror = () => rej();
fileReader.readAsArrayBuffer(myBlob);
});
}

/**
* Called after each audioProcess. Check for silence and give fft time domain data to visualizer.
*/
private analyse() {
if (!this.audioSupported) return;
const analyser = this.analyserNode;
analyser.fftSize = FFT_SIZE;

const bufferLength = analyser.fftSize;
const dataArray = new Uint8Array(bufferLength);
const amplitude = this.options.amplitude;
const time = this.options.time;

analyser.getByteTimeDomainData(dataArray);
this.visualizer(dataArray, bufferLength);

for (let i = 0; i < bufferLength; i++) {
// Normalize between -1 and 1.
const curr_value_time = dataArray[i] / 128 - 1.0;
if (curr_value_time > amplitude || curr_value_time < -1 * amplitude) {
this.start = Date.now();
}
}
const newtime = Date.now();
const elapsedTime = newtime - this.start;
if (elapsedTime > time) {
this.onSilence();
}
}

/**
* Encodes recorded buffer to a wav file and exports it to a blob.
*
* @param exportSampleRate {number} - desired sample rate of the exported buffer
*/
public async exportWAV(exportSampleRate: number = DEFAULT_EXPORT_SAMPLE_RATE) {
if (!this.audioSupported) return;
const recordSampleRate = this.audioContext.sampleRate;
const blob = exportBuffer(this.streamBuffer, this.streamBufferLength, recordSampleRate, exportSampleRate);
this.clear();
return blob;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
// AudioRecorder settings
export const RECORDER_EXPORT_MIME_TYPE = 'application/octet-stream';
export const DEFAULT_EXPORT_SAMPLE_RATE = 16000;

export const FFT_SIZE = 2048; // window size in samples for Fast Fourier Transform (FFT)
export const FFT_MAX_DECIBELS = -10; // maximum power value in the scaling range for the FFT analysis data
export const FFT_MIN_DECIBELS = -90; // minimum power value in the scaling range for the FFT analysis data
export const FFT_SMOOTHING_TIME_CONSTANT = 0.85; // averaging constant with the last analysis frame
Loading