头图

webrtc 音频设备操作之opensl与jni

本节主要分享视频通话中android和ios上操作音频设备的方式,如调解音量大小,启用扬声器

处理音频设备代码目录:src/modules/audio_device/

ls modules/audio_device/
BUILD.gn                        audio_device_buffer.h           audio_device_impl.cc            dummy                           linux
DEPS                            audio_device_config.h           audio_device_impl.h             fine_audio_buffer.cc            mac
OWNERS                          audio_device_data_observer.cc   audio_device_name.cc            fine_audio_buffer.h             mock_audio_device_buffer.h
android                         audio_device_generic.cc         audio_device_name.h             fine_audio_buffer_unittest.cc   win
audio_device_buffer.cc          audio_device_generic.h          audio_device_unittest.cc        include

第一种方式就是直接调用Android或ObjectC的API,代码如下(WebRtcAudioTrack.java):

// Get max possible volume index for a phone call audio stream.
  private int getStreamMaxVolume() {
    threadChecker.checkIsOnValidThread();
    Logging.d(TAG, "getStreamMaxVolume");
    assertTrue(audioManager != null);
    return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
  }

  // Set current volume level for a phone call audio stream.
  private boolean setStreamVolume(int volume) {
    threadChecker.checkIsOnValidThread();
    Logging.d(TAG, "setStreamVolume(" + volume + ")");
    assertTrue(audioManager != null);
    if (isVolumeFixed()) {
      Logging.e(TAG, "The device implements a fixed volume policy.");
      return false;
    }
    audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
    return true;
  }

通过调用Android提供的AudioManager类来操作音频设备(webrtc是用C++写的,通过JNI来调用这些JAVA方法).

下面是IOS中操作音频设备的方式(audio_device_impl.cc):

 int32_t AudioDeviceModuleImpl::StartPlayout() {
  RTC_LOG(INFO) << __FUNCTION__;
  CHECKinitialized_();
  if (Playing()) {
    return 0;
  }
  audio_device_buffer_.StartPlayout();
  int32_t result = audio_device_->StartPlayout();
  RTC_LOG(INFO) << "output: " << result;
  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
                        static_cast<int>(result == 0));
  return result;
}

int32_t AudioDeviceModuleImpl::StopPlayout() {
  RTC_LOG(INFO) << __FUNCTION__;
  CHECKinitialized_();
  int32_t result = audio_device_->StopPlayout();
  audio_device_buffer_.StopPlayout();
  RTC_LOG(INFO) << "output: " << result;
  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
                        static_cast<int>(result == 0));
  return result;
}

既然WebRTC是用C++来实现的,可以用JNI来调用Android的API,那能不能不通过JNI调用JAVA,而是直接调用他底层的实现类库呢?带着这个疑问我们直接看音频设备初始化方法(audio_device_impl.cc):

int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
  RTC_LOG(INFO) << __FUNCTION__;
// Dummy ADM implementations if build flags are set.
#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
  audio_device_.reset(new AudioDeviceDummy());
  RTC_LOG(INFO) << "Dummy Audio APIs will be utilized";
#elif defined(WEBRTC_DUMMY_FILE_DEVICES)
  audio_device_.reset(FileAudioDeviceFactory::CreateFileAudioDevice());
  if (audio_device_) {
    RTC_LOG(INFO) << "Will use file-playing dummy device.";
  } else {
    // Create a dummy device instead.
    audio_device_.reset(new AudioDeviceDummy());
    RTC_LOG(INFO) << "Dummy Audio APIs will be utilized";
  }

// Real (non-dummy) ADM implementations.
#else
  AudioLayer audio_layer(PlatformAudioLayer());
// Windows ADM implementation.
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
  if ((audio_layer == kWindowsCoreAudio) ||
      (audio_layer == kPlatformDefaultAudio)) {
    RTC_LOG(INFO) << "Attempting to use the Windows Core Audio APIs...";
    if (AudioDeviceWindowsCore::CoreAudioIsSupported()) {
      audio_device_.reset(new AudioDeviceWindowsCore());
      RTC_LOG(INFO) << "Windows Core Audio APIs will be utilized";
    }
  }
#endif  // defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)

#if defined(WEBRTC_ANDROID)
  // Create an Android audio manager.
  audio_manager_android_.reset(new AudioManager());
  // Select best possible combination of audio layers.
  if (audio_layer == kPlatformDefaultAudio) {
    if (audio_manager_android_->IsAAudioSupported()) {
      // Use of AAudio for both playout and recording has highest priority.
      audio_layer = kAndroidAAudioAudio;
    } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
               audio_manager_android_->IsLowLatencyRecordSupported()) {
      // Use OpenSL ES for both playout and recording.
      audio_layer = kAndroidOpenSLESAudio;
    } else if (audio_manager_android_->IsLowLatencyPlayoutSupported() &&
               !audio_manager_android_->IsLowLatencyRecordSupported()) {
      // Use OpenSL ES for output on devices that only supports the
      // low-latency output audio path.
      audio_layer = kAndroidJavaInputAndOpenSLESOutputAudio;
    } else {
      // Use Java-based audio in both directions when low-latency output is
      // not supported.
      audio_layer = kAndroidJavaAudio;
    }
  }
  AudioManager* audio_manager = audio_manager_android_.get();
  if (audio_layer == kAndroidJavaAudio) {
    // Java audio for both input and output audio.
    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
        audio_layer, audio_manager));
  } else if (audio_layer == kAndroidOpenSLESAudio) {
    // OpenSL ES based audio for both input and output audio.
    audio_device_.reset(
        new AudioDeviceTemplate<OpenSLESRecorder, OpenSLESPlayer>(
            audio_layer, audio_manager));
  } else if (audio_layer == kAndroidJavaInputAndOpenSLESOutputAudio) {
    // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
    // This combination provides low-latency output audio and at the same
    // time support for HW AEC using the AudioRecord Java API.
    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
        audio_layer, audio_manager));
  } else if (audio_layer == kAndroidAAudioAudio) {
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
    // AAudio based audio for both input and output.
    audio_device_.reset(new AudioDeviceTemplate<AAudioRecorder, AAudioPlayer>(
        audio_layer, audio_manager));
#endif
  } else if (audio_layer == kAndroidJavaInputAndAAudioOutputAudio) {
#if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
    // Java audio for input and AAudio for output audio (i.e. mixed APIs).
    audio_device_.reset(new AudioDeviceTemplate<AudioRecordJni, AAudioPlayer>(
        audio_layer, audio_manager));
#endif
  } else {
    RTC_LOG(LS_ERROR) << "The requested audio layer is not supported";
    audio_device_.reset(nullptr);
  }
// END #if defined(WEBRTC_ANDROID)

// Linux ADM implementation.
// Note that, LINUX_ALSA is always defined by default when WEBRTC_LINUX is
// defined. LINUX_PULSE depends on the 'rtc_include_pulse_audio' build flag.
// TODO(bugs.webrtc.org/9127): improve support and make it more clear that
// PulseAudio is the default selection.
#elif defined(WEBRTC_LINUX)
#if !defined(LINUX_PULSE)
  // Build flag 'rtc_include_pulse_audio' is set to false. In this mode:
  // - kPlatformDefaultAudio => ALSA, and
  // - kLinuxAlsaAudio => ALSA, and
  // - kLinuxPulseAudio => Invalid selection.
  RTC_LOG(WARNING) << "PulseAudio is disabled using build flag.";
  if ((audio_layer == kLinuxAlsaAudio) ||
      (audio_layer == kPlatformDefaultAudio)) {
    audio_device_.reset(new AudioDeviceLinuxALSA());
    RTC_LOG(INFO) << "Linux ALSA APIs will be utilized.";
  }
#else
  // Build flag 'rtc_include_pulse_audio' is set to true (default). In this
  // mode:
  // - kPlatformDefaultAudio => PulseAudio, and
  // - kLinuxPulseAudio => PulseAudio, and
  // - kLinuxAlsaAudio => ALSA (supported but not default).
  RTC_LOG(INFO) << "PulseAudio support is enabled.";
  if ((audio_layer == kLinuxPulseAudio) ||
      (audio_layer == kPlatformDefaultAudio)) {
    // Linux PulseAudio implementation is default.
    audio_device_.reset(new AudioDeviceLinuxPulse());
    RTC_LOG(INFO) << "Linux PulseAudio APIs will be utilized";
  } else if (audio_layer == kLinuxAlsaAudio) {
    audio_device_.reset(new AudioDeviceLinuxALSA());
    RTC_LOG(WARNING) << "Linux ALSA APIs will be utilized.";
  }
#endif  // #if !defined(LINUX_PULSE)
#endif  // #if defined(WEBRTC_LINUX)

// iOS ADM implementation.
#if defined(WEBRTC_IOS)
  if (audio_layer == kPlatformDefaultAudio) {
    audio_device_.reset(new ios_adm::AudioDeviceIOS());
    RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
  }
// END #if defined(WEBRTC_IOS)

// Mac OS X ADM implementation.
#elif defined(WEBRTC_MAC)
  if (audio_layer == kPlatformDefaultAudio) {
    audio_device_.reset(new AudioDeviceMac());
    RTC_LOG(INFO) << "Mac OS X Audio APIs will be utilized.";
  }
#endif  // WEBRTC_MAC

  // Dummy ADM implementation.
  if (audio_layer == kDummyAudio) {
    audio_device_.reset(new AudioDeviceDummy());
    RTC_LOG(INFO) << "Dummy Audio APIs will be utilized.";
  }
#endif  // if defined(WEBRTC_DUMMY_AUDIO_BUILD)

  if (!audio_device_) {
    RTC_LOG(LS_ERROR)
        << "Failed to create the platform specific ADM implementation.";
    return -1;
  }
  return 0;
}
ls modules/audio_device/android/
aaudio_player.cc                audio_common.h                  audio_record_jni.cc             ensure_initialized.cc           opensles_player.h
aaudio_player.h                 audio_device_template.h         audio_record_jni.h              ensure_initialized.h            opensles_recorder.cc
aaudio_recorder.cc              audio_device_unittest.cc        audio_track_jni.cc              java                            opensles_recorder.h
aaudio_recorder.h               audio_manager.cc                audio_track_jni.h               opensles_common.cc
aaudio_wrapper.cc               audio_manager.h                 build_info.cc                   opensles_common.h
aaudio_wrapper.h                audio_manager_unittest.cc       build_info.h                    opensles_player.cc

通过audio_manager_android_->IsLowLatencyPlayoutSupported() && audio_manager_android_->IsLowLatencyRecordSupported()来控制操作音频设备的方式,如果启用则直接调用OpenSles,OpenSles说明如下:
OpenSL ES 提供了可通过C++调用的C语言接口,这些接口功能基本类似于Android中通过JAVA调用的接口:

android.media.MediaPlayer
android.media.MediaRecorder
对于Android NDK开发来说,通过JNI调用OpenSL ES 提供的API,您即可实现大部分音频操作需要而不用在通过Java来操作.

https://www.cnblogs.com/lingyunhu/p/4163859.html


轻口味
16.9k 声望3.9k 粉丝

移动端十年老人,主要做IM、音视频、AI方向,目前在做鸿蒙化适配,欢迎这些方向的同学交流:wodekouwei