1

麦克风音量

When developing live web applications, it is usually necessary to check whether the device is normal before starting the broadcast. This article will introduce how to visualize the microphone volume.

The background where AudioWorklet appears

To do this function, you need to use Chrome's AudioWorklet.

Audio processing in the Web Audio API runs on a separate thread, which is smoother. It was previously proposed to use audioContext.createScriptProcessor for processing audio, but it was designed to be asynchronous, and the accompanying problem was that processing would be "delayed".

So AudioWorklet was born to replace createScriptProcessor.

AudioWorklet can integrate the JS code provided by the user into the audio processing thread, without jumping to the main thread to process the audio, thus ensuring zero delay and synchronous rendering.

Conditions of Use

Using Audio Worklet consists of two parts: AudioWorkletProcessor and AudioWorkletNode.

  • AudioWorkletProcessor represents the real audio processing JS code, running in AudioWorkletGlobalScope.
  • AudioWorkletNode corresponds to AudioWorkletProcessor and plays the role of connecting the main thread AudioNodes.

Write code

First, let's write AudioWorkletProcessor, that is, the logic code for processing audio, in a separate js file named processor.js, which will run on a separate thread.

 // processor.js
const SMOOTHING_FACTOR = 0.8

class VolumeMeter extends AudioWorkletProcessor {
  static get parameterDescriptors() {
    return []
  }

  constructor() {
    super()
    this.volume = 0
    this.lastUpdate = currentTime
  }

  calculateVolume(inputs) {
    const inputChannelData = inputs[0][0]
    let sum = 0

    // Calculate the squared-sum.
    for (let i = 0; i < inputChannelData.length; ++i) {
      sum += inputChannelData[i] * inputChannelData[i]
    }

    // Calculate the RMS level and update the volume.
    const rms = Math.sqrt(sum / inputChannelData.length)

    this.volume = Math.max(rms, this.volume * SMOOTHING_FACTOR)

    // Post a message to the node every 200ms.
    if (currentTime - this.lastUpdate > 0.2) {
      this.port.postMessage({ eventType: "volume", volume: this.volume * 100 })
      // Store previous time
      this.lastUpdate = currentTime
    }
  }

  process(inputs, outputs, parameters) {
    this.calculateVolume(inputs)

    return true
  }
}

registerProcessor('vumeter', VolumeMeter); // 注册一个名为 vumeter 的处理函数 注意:与主线程中的名字对应。

Encapsulated into a class that inherits from AudioWorkletProcessor, VolumeMeter (volume meter).

main thread code

 // 告诉用户程序需要使用麦克风
function activeSound () {
    try {
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
        
        navigator.getUserMedia({ audio: true, video: false }, onMicrophoneGranted, onMicrophoneDenied);
    } catch(e) {
        alert(e)
    }
}

async function onMicrophoneGranted(stream) {
    // Initialize AudioContext object
    audioContext = new AudioContext()

    // Creating a MediaStreamSource object and sending a MediaStream object granted by the user
    let microphone = audioContext.createMediaStreamSource(stream)

    await audioContext.audioWorklet.addModule('processor.js')
    // Creating AudioWorkletNode sending
    // context and name of processor registered
    // in vumeter-processor.js
    const node = new AudioWorkletNode(audioContext, 'vumeter')

    // Listing any message from AudioWorkletProcessor in its
    // process method here where you can know
    // the volume level
    node.port.onmessage  = event => {
        // console.log(event.data.volume) // 在这里就可以获取到processor.js 检测到的音量值
        handleVolumeCellColor(event.data.volume) // 处理页面效果函数
    }

    // Now this is the way to
    // connect our microphone to
    // the AudioWorkletNode and output from audioContext
    microphone.connect(node).connect(audioContext.destination)
}

function onMicrophoneDenied() {
    console.log('denied')
}

Handle page display logic

With the above code, we can already get the volume of the system microphone, and now the task is to display it on the page.

Prepare page structure and style code:

 <style>
    .volume-group {
        width: 200px;
        height: 50px;
        background-color: black;
        display: flex;
        align-items: center;
        gap: 5px;
        padding: 0 10px;
    }
    .volume-cell {
        width: 10px;
        height: 30px;
        background-color: #e3e3e5;
    }
</style>

<div class="volume-group">
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
</div>

Rendering logic:

 /**
 * 该函数用于处理 volume cell 颜色变化
 */
function handleVolumeCellColor(volume) {
    const allVolumeCells = [...volumeCells]
    const numberOfCells = Math.round(volume)
    const cellsToColored = allVolumeCells.slice(0, numberOfCells)

    for (const cell of allVolumeCells) {
        cell.style.backgroundColor = "#e3e3e5"
    }

    for (const cell of cellsToColored) {
        cell.style.backgroundColor = "#79c545"
    }
}

full code

Paste the complete code of the main thread below and run it in the same directory as processor.js.

 <!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>AudioContext</title>
    <style>
        .volume-group {
            width: 200px;
            height: 50px;
            background-color: black;
            display: flex;
            align-items: center;
            gap: 5px;
            padding: 0 10px;
        }
        .volume-cell {
            width: 10px;
            height: 30px;
            background-color: #e3e3e5;
        }
    </style>
</head>
<body>
    <div class="volume-group">
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
    </div>
<script>
    function activeSound () {
        // Tell user that this program wants to use the microphone
        try {
            navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
            
            navigator.getUserMedia({ audio: true, video: false }, onMicrophoneGranted, onMicrophoneDenied);
        } catch(e) {
            alert(e)
        }
    }

    const volumeCells = document.querySelectorAll(".volume-cell")
    
    async function onMicrophoneGranted(stream) {
        // Initialize AudioContext object
        audioContext = new AudioContext()

        // Creating a MediaStreamSource object and sending a MediaStream object granted by the user
        let microphone = audioContext.createMediaStreamSource(stream)

        await audioContext.audioWorklet.addModule('processor.js')
        // Creating AudioWorkletNode sending
        // context and name of processor registered
        // in vumeter-processor.js
        const node = new AudioWorkletNode(audioContext, 'vumeter')

        // Listing any message from AudioWorkletProcessor in its
        // process method here where you can know
        // the volume level
        node.port.onmessage  = event => {
            // console.log(event.data.volume)
            handleVolumeCellColor(event.data.volume)
        }

        // Now this is the way to
        // connect our microphone to
        // the AudioWorkletNode and output from audioContext
        microphone.connect(node).connect(audioContext.destination)
    }

    function onMicrophoneDenied() {
        console.log('denied')
    }

    /**
     * 该函数用于处理 volume cell 颜色变化
     */
    function handleVolumeCellColor(volume) {
        const allVolumeCells = [...volumeCells]
        const numberOfCells = Math.round(volume)
        const cellsToColored = allVolumeCells.slice(0, numberOfCells)

        for (const cell of allVolumeCells) {
            cell.style.backgroundColor = "#e3e3e5"
        }

        for (const cell of cellsToColored) {
            cell.style.backgroundColor = "#79c545"
        }
    }

    activeSound()
</script>
</body>
</html>

Reference documentation

Enter Audio Worklet

This concludes the article. If it is useful to you, please like it, thank you.

The article was first published on IICCOM-Personal Blog|Technical Blog - Browser Detection Microphone Volume


来了老弟
508 声望31 粉丝

纸上得来终觉浅,绝知此事要躬行