Skip to content

Commit

Permalink
feat: add
Browse files Browse the repository at this point in the history
  • Loading branch information
QinZhen001 committed May 15, 2024
1 parent 37e9105 commit c05bb2f
Show file tree
Hide file tree
Showing 15 changed files with 231 additions and 171 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,14 @@ import { useEffect, useRef } from "react"
let frequencyData: Uint8Array
let bufferLength = 0
let analyser: AnalyserNode
const context = new AudioContext()
// 它包含了一些写在内存中的音频数据,通常储存在一个 ArrayBuffer 对象中
const dataSourceNode = context.createBufferSource()

export const AudioApi = () => {
const ref = useRef<HTMLInputElement>(null)
const canvasRef = useRef<HTMLCanvasElement>(null)

function createAnalyser(context: AudioContext, dataSource: AudioBufferSourceNode): AnalyserNode {
console.log("context", context)
const analyser = context.createAnalyser()
// The size of the FFT (Fast Fourier Transform) used for frequency-domain analysis.
analyser.fftSize = 512
dataSource.connect(analyser)
analyser.connect(context.destination)
return analyser
}

function drawBar() {
requestAnimationFrame(drawBar)
analyser.getByteFrequencyData(frequencyData)
Expand Down Expand Up @@ -52,22 +45,22 @@ export const AudioApi = () => {
const file = event.target?.files[0]
const reader = new FileReader()
reader.readAsArrayBuffer(file)
reader.onload = (evt) => {
console.log("evt", evt)
// @ts-ignore
const encodedBuffer = evt.currentTarget?.result
const context = new AudioContext()
context.decodeAudioData(encodedBuffer, (decodedBuffer) => {
const dataSource = context.createBufferSource() // AudioBufferSourceNode
dataSource.buffer = decodedBuffer
console.log("dataSource", dataSource)
analyser = createAnalyser(context, dataSource)
bufferLength = analyser.frequencyBinCount
console.log("analyser", analyser)
frequencyData = new Uint8Array(bufferLength)
dataSource.start()
drawBar()
})
reader.onload = async (evt: any) => {
const audioArrayBuffer = evt.target.result // ArrayBuffer
console.log("audioData", audioArrayBuffer)
// 从一个音频文件解码构建一个 AudioBuffer
const decodeAudioBuffer = await context.decodeAudioData(audioArrayBuffer)
dataSourceNode.buffer = decodeAudioBuffer
analyser = context.createAnalyser()
// The size of the FFT (Fast Fourier Transform) used for frequency-domain analysis.
// FFT 是一种用于将时域信号(例如音频)转换为频域信号的算法。
analyser.fftSize = 512
dataSourceNode.connect(analyser)
analyser.connect(context.destination)
bufferLength = analyser.frequencyBinCount
frequencyData = new Uint8Array(bufferLength)
dataSourceNode.start()
drawBar()
}
}
}
Expand Down
3 changes: 3 additions & 0 deletions src/pages/web-media/audio-buffer/index.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
.item {
padding: 4px;
}
68 changes: 68 additions & 0 deletions src/pages/web-media/audio-buffer/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import { useEffect } from "react"

import "./index.css"

const audioContext = new AudioContext()
let audioBuffer: AudioBuffer

const AudioBuffer = () => {
const onClickClose = () => {
audioContext.close()
}

const onClickCreateBuffer = () => {
// 立体声
const channels = 2
// 创建一个 采样率与音频环境 (AudioContext) 相同的 时长 2 秒的 音频片段。
// audioContext.sampleRate 默认 44100
const frameCount = audioContext.sampleRate * 2.0
audioBuffer = audioContext.createBuffer(channels, frameCount, audioContext.sampleRate)
// 填充数据
for (let channel = 0; channel < channels; channel++) {
// 使用白噪声填充 (-1.0 到 1.0 之间的随机数)
// 读取实际音频片段 (AudioBuffer) 中包含的数据
const nowBuffering = audioBuffer.getChannelData(channel)
for (let i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1
}
}

console.log("createBuffer success", audioBuffer)
}

const onClickAudioBufferPlay = () => {
if (!audioBuffer) {
throw new Error("audioBuffer is not created")
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
const sourceNode = audioContext.createBufferSource()
// set the buffer in the AudioBufferSourceNode
sourceNode.buffer = audioBuffer
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
sourceNode.connect(audioContext.destination) // audioContext.destination 一般是扬声器
// start the source playing
sourceNode.start()
}

return (
<div>
<div className="item">
<button onClick={onClickCreateBuffer}>createBuffer (从原始数据构建)</button>
</div>
<div className="item">
<button onClick={onClickAudioBufferPlay}>
播放 AudioBuffer (通过AudioBufferSourceNode播放)
</button>
</div>
<div className="item">
<button onClick={onClickClose}>close audioContext</button>
</div>
</div>
)
}

export default AudioBuffer
File renamed without changes.
37 changes: 37 additions & 0 deletions src/pages/web-media/audio-pcm/components/planAudioWorkletNode.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// import processorURL from "../lib/pcm-processor.js?url"
import { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng"

// console.log("processorURL", processorURL)

interface PlanAudioWorkletNodeProps {
audioTrack?: IMicrophoneAudioTrack
}

const PlanAudioWorkletNode = (props: PlanAudioWorkletNodeProps) => {
const { audioTrack } = props

const dealAudioPcmData = async () => {
if (!audioTrack) {
throw new Error("audioTrack is null")
}
const audioContext = new AudioContext()
await audioContext.audioWorklet.addModule("pcm-processor.js")
const audioWorkletNode = new AudioWorkletNode(audioContext, "pcm-processor")

const audioMediaStreamTrack = audioTrack.getMediaStreamTrack()
const mNode = audioContext.createMediaStreamSource(new MediaStream([audioMediaStreamTrack]))
mNode.connect(audioWorkletNode)

audioWorkletNode.port.onmessage = (event) => {
console.log("event onmessage", event.data)
}
}

return (
<div>
<button onClick={dealAudioPcmData}>dealAudioPcmData</button>
</div>
)
}

export default PlanAudioWorkletNode
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ const AudioPcm = () => {
<button onClick={audioStop}>audioStop</button>
</section>
<section className="section">
<button onClick={changeScheme}>changeScheme: {schemeText}</button>
<button onClick={changeScheme}>scheme: {schemeText}</button>
</section>
<section className="section">
{plan == 1 ? (
Expand Down
File renamed without changes.
50 changes: 50 additions & 0 deletions src/pages/web-media/gain-node/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import { useState } from "react"

const audioCtx = new AudioContext()
const gainNode = audioCtx.createGain()
let audioSourceNode

const GainNode = () => {
const [muted, setMuted] = useState(false)

const onClickGainNode = async () => {
if (!navigator.mediaDevices.getUserMedia) {
throw new Error("getUserMedia is not supported")
}
try {
const audioStream = await navigator.mediaDevices.getUserMedia({
audio: true,
})
console.log("audioStream", audioStream)
audioSourceNode = audioCtx.createMediaStreamSource(audioStream)
audioSourceNode.connect(gainNode)
gainNode.connect(audioCtx.destination)
console.log("gainNode", gainNode)
} catch (e) {
console.error("error", e)
throw e
}
}

const onClickMute = () => {
const currentTime = audioCtx.currentTime
console.log("currentTime", currentTime)
if (!muted) {
// 0 means mute. If you still hear something, make sure you haven't
// connected your source into the output in addition to using the GainNode.
gainNode.gain.setValueAtTime(0, currentTime)
} else {
gainNode.gain.setValueAtTime(1, currentTime)
}
setMuted(!muted)
}

return (
<div>
<button onClick={onClickGainNode}>dealGainNode (播放麦克风声音)</button>
<button onClick={onClickMute}>muted: {muted ? "true" : "false"}</button>
</div>
)
}

export default GainNode
48 changes: 48 additions & 0 deletions src/pages/web-media/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import { lazy } from "react"
import { CommonPageRouter } from "../main"

const WebRtcSimple = lazy(() => import("./simple"))
const WebRtcDataChannel = lazy(() => import("./data-channel"))
const AnalyserNode = lazy(() => import("./analyser-node"))
const AudioPcm = lazy(() => import("./audio-pcm"))
const AudioBuffer = lazy(() => import("./audio-buffer"))
const GainNode = lazy(() => import("./gain-node"))

export const children = [
{
path: "webrtcSimple",
element: <WebRtcSimple></WebRtcSimple>,
title: "快速入门 WebRTC",
},
{
path: "webrtcDataChannel",
element: <WebRtcDataChannel></WebRtcDataChannel>,
title: "WebRTC DataChannel",
},
{
path: "analyserNode",
element: <AnalyserNode></AnalyserNode>,
title: "analyserNode 音频可视化分析",
},
{
path: "audioPcm",
element: <AudioPcm></AudioPcm>,
title: "获取麦克风pcm数据",
},
{
path: "audioBuffer",
element: <AudioBuffer></AudioBuffer>,
title: "AudioBuffer相关",
},
{
path: "gainNode",
element: <GainNode></GainNode>,
title: "GainNode相关",
},
]

export const WebMediaPage = () => {
return <CommonPageRouter routes={children}></CommonPageRouter>
}

export default WebMediaPage
File renamed without changes.
103 changes: 0 additions & 103 deletions src/pages/web-rtc/audio-pcm/components/planAudioWorkletNode.tsx

This file was deleted.

Loading

0 comments on commit c05bb2f

Please sign in to comment.