diff --git a/src/pages/web-rtc/audio-api/index.tsx b/src/pages/web-media/analyser-node/index.tsx similarity index 60% rename from src/pages/web-rtc/audio-api/index.tsx rename to src/pages/web-media/analyser-node/index.tsx index c4311e8..018a715 100644 --- a/src/pages/web-rtc/audio-api/index.tsx +++ b/src/pages/web-media/analyser-node/index.tsx @@ -9,21 +9,14 @@ import { useEffect, useRef } from "react" let frequencyData: Uint8Array let bufferLength = 0 let analyser: AnalyserNode +const context = new AudioContext() +// 它包含了一些写在内存中的音频数据,通常储存在一个 ArrayBuffer 对象中 +const dataSourceNode = context.createBufferSource() export const AudioApi = () => { const ref = useRef(null) const canvasRef = useRef(null) - function createAnalyser(context: AudioContext, dataSource: AudioBufferSourceNode): AnalyserNode { - console.log("context", context) - const analyser = context.createAnalyser() - // The size of the FFT (Fast Fourier Transform) used for frequency-domain analysis. - analyser.fftSize = 512 - dataSource.connect(analyser) - analyser.connect(context.destination) - return analyser - } - function drawBar() { requestAnimationFrame(drawBar) analyser.getByteFrequencyData(frequencyData) @@ -52,22 +45,22 @@ export const AudioApi = () => { const file = event.target?.files[0] const reader = new FileReader() reader.readAsArrayBuffer(file) - reader.onload = (evt) => { - console.log("evt", evt) - // @ts-ignore - const encodedBuffer = evt.currentTarget?.result - const context = new AudioContext() - context.decodeAudioData(encodedBuffer, (decodedBuffer) => { - const dataSource = context.createBufferSource() // AudioBufferSourceNode - dataSource.buffer = decodedBuffer - console.log("dataSource", dataSource) - analyser = createAnalyser(context, dataSource) - bufferLength = analyser.frequencyBinCount - console.log("analyser", analyser) - frequencyData = new Uint8Array(bufferLength) - dataSource.start() - drawBar() - }) + reader.onload = async (evt: any) => { + const audioArrayBuffer = evt.target.result // ArrayBuffer + console.log("audioData", audioArrayBuffer) + // 从一个音频文件解码构建一个 AudioBuffer + const decodeAudioBuffer = await context.decodeAudioData(audioArrayBuffer) + dataSourceNode.buffer = decodeAudioBuffer + analyser = context.createAnalyser() + // The size of the FFT (Fast Fourier Transform) used for frequency-domain analysis. + // FFT 是一种用于将时域信号(例如音频)转换为频域信号的算法。 + analyser.fftSize = 512 + dataSourceNode.connect(analyser) + analyser.connect(context.destination) + bufferLength = analyser.frequencyBinCount + frequencyData = new Uint8Array(bufferLength) + dataSourceNode.start() + drawBar() } } } diff --git a/src/pages/web-media/audio-buffer/index.css b/src/pages/web-media/audio-buffer/index.css new file mode 100644 index 0000000..43d6544 --- /dev/null +++ b/src/pages/web-media/audio-buffer/index.css @@ -0,0 +1,3 @@ +.item { + padding: 4px; +} diff --git a/src/pages/web-media/audio-buffer/index.tsx b/src/pages/web-media/audio-buffer/index.tsx new file mode 100644 index 0000000..bf3752d --- /dev/null +++ b/src/pages/web-media/audio-buffer/index.tsx @@ -0,0 +1,68 @@ +import { useEffect } from "react" + +import "./index.css" + +const audioContext = new AudioContext() +let audioBuffer: AudioBuffer + +const AudioBuffer = () => { + const onClickClose = () => { + audioContext.close() + } + + const onClickCreateBuffer = () => { + // 立体声 + const channels = 2 + // 创建一个 采样率与音频环境 (AudioContext) 相同的 时长 2 秒的 音频片段。 + // audioContext.sampleRate 默认 44100 + const frameCount = audioContext.sampleRate * 2.0 + audioBuffer = audioContext.createBuffer(channels, frameCount, audioContext.sampleRate) + // 填充数据 + for (let channel = 0; channel < channels; channel++) { + // 使用白噪声填充 (-1.0 到 1.0 之间的随机数) + // 读取实际音频片段 (AudioBuffer) 中包含的数据 + const nowBuffering = audioBuffer.getChannelData(channel) + for (let i = 0; i < frameCount; i++) { + // Math.random() is in [0; 1.0] + // audio needs to be in [-1.0; 1.0] + nowBuffering[i] = Math.random() * 2 - 1 + } + } + + console.log("createBuffer success", audioBuffer) + } + + const onClickAudioBufferPlay = () => { + if (!audioBuffer) { + throw new Error("audioBuffer is not created") + } + // Get an AudioBufferSourceNode. + // This is the AudioNode to use when we want to play an AudioBuffer + const sourceNode = audioContext.createBufferSource() + // set the buffer in the AudioBufferSourceNode + sourceNode.buffer = audioBuffer + // connect the AudioBufferSourceNode to the + // destination so we can hear the sound + sourceNode.connect(audioContext.destination) // audioContext.destination 一般是扬声器 + // start the source playing + sourceNode.start() + } + + return ( +
+
+ +
+
+ +
+
+ +
+
+ ) +} + +export default AudioBuffer diff --git a/src/pages/web-rtc/audio-pcm/README.md b/src/pages/web-media/audio-pcm/README.md similarity index 100% rename from src/pages/web-rtc/audio-pcm/README.md rename to src/pages/web-media/audio-pcm/README.md diff --git a/src/pages/web-media/audio-pcm/components/planAudioWorkletNode.tsx b/src/pages/web-media/audio-pcm/components/planAudioWorkletNode.tsx new file mode 100644 index 0000000..12e5525 --- /dev/null +++ b/src/pages/web-media/audio-pcm/components/planAudioWorkletNode.tsx @@ -0,0 +1,37 @@ +// import processorURL from "../lib/pcm-processor.js?url" +import { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" + +// console.log("processorURL", processorURL) + +interface PlanAudioWorkletNodeProps { + audioTrack?: IMicrophoneAudioTrack +} + +const PlanAudioWorkletNode = (props: PlanAudioWorkletNodeProps) => { + const { audioTrack } = props + + const dealAudioPcmData = async () => { + if (!audioTrack) { + throw new Error("audioTrack is null") + } + const audioContext = new AudioContext() + await audioContext.audioWorklet.addModule("pcm-processor.js") + const audioWorkletNode = new AudioWorkletNode(audioContext, "pcm-processor") + + const audioMediaStreamTrack = audioTrack.getMediaStreamTrack() + const mNode = audioContext.createMediaStreamSource(new MediaStream([audioMediaStreamTrack])) + mNode.connect(audioWorkletNode) + + audioWorkletNode.port.onmessage = (event) => { + console.log("event onmessage", event.data) + } + } + + return ( +
+ +
+ ) +} + +export default PlanAudioWorkletNode diff --git a/src/pages/web-rtc/audio-pcm/components/planInsertableStream.tsx b/src/pages/web-media/audio-pcm/components/planInsertableStream.tsx similarity index 100% rename from src/pages/web-rtc/audio-pcm/components/planInsertableStream.tsx rename to src/pages/web-media/audio-pcm/components/planInsertableStream.tsx diff --git a/src/pages/web-rtc/audio-pcm/index.css b/src/pages/web-media/audio-pcm/index.css similarity index 100% rename from src/pages/web-rtc/audio-pcm/index.css rename to src/pages/web-media/audio-pcm/index.css diff --git a/src/pages/web-rtc/audio-pcm/index.tsx b/src/pages/web-media/audio-pcm/index.tsx similarity index 95% rename from src/pages/web-rtc/audio-pcm/index.tsx rename to src/pages/web-media/audio-pcm/index.tsx index bb7f486..cf66f98 100644 --- a/src/pages/web-rtc/audio-pcm/index.tsx +++ b/src/pages/web-media/audio-pcm/index.tsx @@ -43,7 +43,7 @@ const AudioPcm = () => {
- +
{plan == 1 ? ( diff --git a/src/pages/web-rtc/data-channel/index.tsx b/src/pages/web-media/data-channel/index.tsx similarity index 100% rename from src/pages/web-rtc/data-channel/index.tsx rename to src/pages/web-media/data-channel/index.tsx diff --git a/src/pages/web-media/gain-node/index.tsx b/src/pages/web-media/gain-node/index.tsx new file mode 100644 index 0000000..98036fe --- /dev/null +++ b/src/pages/web-media/gain-node/index.tsx @@ -0,0 +1,50 @@ +import { useState } from "react" + +const audioCtx = new AudioContext() +const gainNode = audioCtx.createGain() +let audioSourceNode + +const GainNode = () => { + const [muted, setMuted] = useState(false) + + const onClickGainNode = async () => { + if (!navigator.mediaDevices.getUserMedia) { + throw new Error("getUserMedia is not supported") + } + try { + const audioStream = await navigator.mediaDevices.getUserMedia({ + audio: true, + }) + console.log("audioStream", audioStream) + audioSourceNode = audioCtx.createMediaStreamSource(audioStream) + audioSourceNode.connect(gainNode) + gainNode.connect(audioCtx.destination) + console.log("gainNode", gainNode) + } catch (e) { + console.error("error", e) + throw e + } + } + + const onClickMute = () => { + const currentTime = audioCtx.currentTime + console.log("currentTime", currentTime) + if (!muted) { + // 0 means mute. If you still hear something, make sure you haven't + // connected your source into the output in addition to using the GainNode. + gainNode.gain.setValueAtTime(0, currentTime) + } else { + gainNode.gain.setValueAtTime(1, currentTime) + } + setMuted(!muted) + } + + return ( +
+ + +
+ ) +} + +export default GainNode diff --git a/src/pages/web-media/index.tsx b/src/pages/web-media/index.tsx new file mode 100644 index 0000000..e349446 --- /dev/null +++ b/src/pages/web-media/index.tsx @@ -0,0 +1,48 @@ +import { lazy } from "react" +import { CommonPageRouter } from "../main" + +const WebRtcSimple = lazy(() => import("./simple")) +const WebRtcDataChannel = lazy(() => import("./data-channel")) +const AnalyserNode = lazy(() => import("./analyser-node")) +const AudioPcm = lazy(() => import("./audio-pcm")) +const AudioBuffer = lazy(() => import("./audio-buffer")) +const GainNode = lazy(() => import("./gain-node")) + +export const children = [ + { + path: "webrtcSimple", + element: , + title: "快速入门 WebRTC", + }, + { + path: "webrtcDataChannel", + element: , + title: "WebRTC DataChannel", + }, + { + path: "analyserNode", + element: , + title: "analyserNode 音频可视化分析", + }, + { + path: "audioPcm", + element: , + title: "获取麦克风pcm数据", + }, + { + path: "audioBuffer", + element: , + title: "AudioBuffer相关", + }, + { + path: "gainNode", + element: , + title: "GainNode相关", + }, +] + +export const WebMediaPage = () => { + return +} + +export default WebMediaPage diff --git a/src/pages/web-rtc/simple/index.tsx b/src/pages/web-media/simple/index.tsx similarity index 100% rename from src/pages/web-rtc/simple/index.tsx rename to src/pages/web-media/simple/index.tsx diff --git a/src/pages/web-rtc/audio-pcm/components/planAudioWorkletNode.tsx b/src/pages/web-rtc/audio-pcm/components/planAudioWorkletNode.tsx deleted file mode 100644 index 387117c..0000000 --- a/src/pages/web-rtc/audio-pcm/components/planAudioWorkletNode.tsx +++ /dev/null @@ -1,103 +0,0 @@ -// import processorURL from "../lib/pcm-processor.js?url" -import { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" - -// console.log("processorURL", processorURL) - -interface PlanAudioWorkletNodeProps { - audioTrack?: IMicrophoneAudioTrack -} - -const PlanAudioWorkletNode = (props: PlanAudioWorkletNodeProps) => { - const { audioTrack } = props - - const dealAudioPcmData = async () => { - if (!audioTrack) { - throw new Error("audioTrack is null") - } - const audioContext = new AudioContext() - - await audioContext.audioWorklet.addModule("pcm-processor.js") - // const audioMediaStreamTrack = audioTrack.getMediaStreamTrack() - // 创建MediaStreamDestination节点 - // const audioDestination = audioContext.createMediaStreamDestination() - // 创建AudioWorkletNode节点 - const audioWorkletNode = new AudioWorkletNode(audioContext, "pcm-processor") - // 连接AudioWorkletNode到MediaStreamDestination - // audioWorkletNode.connect(audioDestination) - // 将MediaStreamTrack加入MediaStreamDestination的输出轨道 - // audioDestination.stream.addTrack(audioMediaStreamTrack) - // 将MediaStreamDestination作为媒体输入 - // const mediaStream = audioDestination.stream - - const audioMediaStreamTrack = audioTrack.getMediaStreamTrack() - const mNode = audioContext.createMediaStreamSource(new MediaStream([audioMediaStreamTrack])) - mNode.connect(audioWorkletNode) - - audioWorkletNode.port.onmessage = (event) => { - console.log("event onmessage", event.data) - } - - // const options = {} - // const source = new MediaElementAudioSourceNode(audioContext, options) - - // const source = audioContext.createMediaStreamSource(audioMediaStreamTrack) - // const processor = audioContext.createScriptProcessor(1024, 1, 1) - - // processor.onaudioprocess = (event) => { - // const inputBuffer = event.inputBuffer.getChannelData(0) - // // 示例:将PCM数据转换成Float32Array - // const pcmArray = new Float32Array(inputData.length) - // } - - // const audioSourceNode = audioContext.create(audioTrack) - } - - // const dealAudioPcmData = () => { - // if (!audioTrack) { - // throw new Error("audioTrack is null") - // } - // const audioMediaStreamTrack = audioTrack.getMediaStreamTrack() - // // 创建一个AudioContext对象 - // const audioContext = new AudioContext() - // const mediaStream = new MediaStream([audioMediaStreamTrack]) - // // 使用createMediaStreamSource将MediaStream转换为AudioNode - // const sourceNode = audioContext.createMediaStreamSource(mediaStream) - // // 使用createScriptProcessor创建一个脚本处理器 - // // The ScriptProcessorNode is deprecated - // const scriptProcessorNode = audioContext.createScriptProcessor(1024, 1, 1) - // // 将脚本处理器添加到AudioNode的处理链中 - // sourceNode.connect(scriptProcessorNode) - // scriptProcessorNode.connect(audioContext.destination) - - // // 当脚本处理器执行时,将执行这个回调函数 - // scriptProcessorNode.onaudioprocess = function (event) { - // const inputBuffer = event.inputBuffer - // const outputBuffer = event.outputBuffer - - // console.log("inputBuffer", inputBuffer) - // console.log("outputBuffer", outputBuffer) - - // for (let channel = 0; channel < outputBuffer.numberOfChannels; channel++) { - // const inputData = inputBuffer.getChannelData(channel) - // const outputData = outputBuffer.getChannelData(channel) - - // console.log("inputData", inputData) - // console.log("outputData", outputData) - - // // 处理PCM格式的音频数据,例如将其编码为MP3格式 - // // ... - - // // 将处理后的音频数据存储 - // // ... - // } - // } - // } - - return ( -
- -
- ) -} - -export default PlanAudioWorkletNode diff --git a/src/pages/web-rtc/index.tsx b/src/pages/web-rtc/index.tsx deleted file mode 100644 index e60a0ca..0000000 --- a/src/pages/web-rtc/index.tsx +++ /dev/null @@ -1,36 +0,0 @@ -import { lazy } from "react" -import { CommonPageRouter } from "../main" - -const WebRtcSimple = lazy(() => import("./simple")) -const WebRtcDataChannel = lazy(() => import("./data-channel")) -const AudioApi = lazy(() => import("./audio-api")) -const AudioPcm = lazy(() => import("./audio-pcm")) - -export const children = [ - { - path: "webrtc-simple", - element: , - title: "快速入门 WebRTC", - }, - { - path: "webrtc-data-channel", - element: , - title: "WebRTC DataChannel", - }, - { - path: "audio-api", - element: , - title: "audio api相关", - }, - { - path: "audio-pcm", - element: , - title: "获取麦克风pcm数据", - }, -] - -export const WebRtcPage = () => { - return -} - -export default WebRtcPage diff --git a/src/router/index.tsx b/src/router/index.tsx index 3e87b42..46cec8d 100644 --- a/src/router/index.tsx +++ b/src/router/index.tsx @@ -1,7 +1,7 @@ // 最好把这几个children直接写进这里 (不要import)(import会导致加载对应的index.js 懒加载失效) import { children as vueChildren } from "../pages/vue" import { children as otherChildren } from "../pages/others" -import { children as RtcChildren } from "../pages/web-rtc" +import { children as RtcChildren } from "../pages/web-media" import { children as ReactChildren } from "../pages/react" import { children as AnimateChildren } from "../pages/animation" import { children as CanvasChildren } from "../pages/canvas" @@ -17,7 +17,7 @@ const WebpackPage = lazy(() => import("../pages/webpack")) const NodePage = lazy(() => import("../pages/node")) const VitePage = lazy(() => import("../pages/vite")) const MainPage = lazy(() => import("../pages/main")) -const WebRtcPage = lazy(() => import("../pages/web-rtc")) +const WebMediaPage = lazy(() => import("../pages/web-media")) const OtherPage = lazy(() => import("../pages/others")) const AnimationPage = lazy(() => import("../pages/animation")) const CanvasPage = lazy(() => import("../pages/canvas")) @@ -58,9 +58,9 @@ export const routes: PageRoute[] = [ children: NodeChildren, }, { - path: "/web-rtc", - element: , - title: "web-rtc 相关", + path: "/web-media", + element: , + title: "web音视频相关", children: RtcChildren, }, {