iOS SDK升级到7.0之后,Apple对AudioSession做了重大革新,因此很多接口都需要调整。

基础概念

首先我们得了解一下AudioSession和AudioQueue分别是什么东西
Session就好像我们家里音响设备的总管理
Queue负责具体实现播放和录音

[AVAudioSession sharedInstance]

来获取AVAudioSession的实例

加载AudioSession

这里我们需要实现启动AudioSession、处理被中断(比如你在使用VoIP的时候,突然一个电话打进来……)

AVAudioSession *session=[AVAudioSession sharedInstance];
    //AVAudioSessionPortOverrideSpeaker
    if (![session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionMixWithOthers| AVAudioSessionCategoryOptionDefaultToSpeaker  error:nil])
    {
        //无法启动语音
        return;
    }
    [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(routeChange:) name:AVAudioSessionRouteChangeNotification object:nil];
    [[NSNotificationCenter defaultCenter] addObserverForName:AVAudioSessionInterruptionNotification object:session queue:nil usingBlock:^(NSNotification *notification)
    {
        int status=[[notification.userInfo valueForKey:AVAudioSessionInterruptionTypeKey] intValue];
                if ( status== AVAudioSessionInterruptionTypeBegan)
        {
           //语音已被中断
            AudioQueuePause(_recordQueue); //这时候暂停了录制的Queue
        }
       else if(status==AVAudioSessionInterruptionTypeEnded)
        {
            [[AVAudioSession sharedInstance] setActive:TRUE error:nil]; //重新激活
            AudioQueueStart(_recordQueue,nil);
            //重启 恢复
        }
    }];
    if (![session setActive:YES error:&error]) //这里激活session
    {
        return;
    }

AVAudioSessionCategoryPlayAndRecord:同时录音和放音
AVAudioSessionCategoryOptionMixWithOthers| 与其他应用程序混音AVAudioSessionCategoryOptionDefaultToSpeaker 强制播放到扬声器(蓝牙无效,耳机插入有效)

获取权限、加载和配置AudioQueue

然后来做个基础的定义

#define ProcessPeo 0.03
#define PlayBaSam 48000
#define RecordSam 44100

实现VoIP,录音必不可少,不过需要请求和判断录音权限、然后加载录音用的AudioQueue(播放和录音互相独立了开来):

//获取当前的录音权限
switch ([AVAudioSession sharedInstance].recordPermission) {
        case AVAudioSessionRecordPermissionUndetermined: {
            UIAlertView *a = [[UIAlertView alloc] initWithTitle:@"授权提示" message:@"你需要授权" delegate:self  cancelButtonTitle:@"好的" otherButtonTitles:nil, nil];
            [a show];
            break;
        }
        case AVAudioSessionRecordPermissionDenied:
            [[[CustomAlertView alloc] initWithTitle:@"您拒绝了使用麦克风的请求。如果需要恢复,请去系统设置。" message:@"TX无法使用" delegate:nil cancelButtonTitle:@"确定" otherButtonTitles: nil] show];
            break;
        case AVAudioSessionRecordPermissionGranted: {
            break;
        }
            
        default:
            break;
    }
//开始请求
[session requestRecordPermission:^(BOOL granted) {
        if(granted) {
            //录音部分开始
            AudioStreamBasicDescription _recordFormat;
            bzero(&_recordFormat, sizeof(AudioStreamBasicDescription));
            _recordFormat.mSampleRate         = RecordSam;
            _recordFormat.mFormatID           = kAudioFormatLinearPCM;
            _recordFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger |
            kAudioFormatFlagsNativeEndian |
            kAudioFormatFlagIsPacked;
            _recordFormat.mFramesPerPacket    = 1;
            _recordFormat.mChannelsPerFrame   = 1;
            _recordFormat.mBitsPerChannel     = 16;
            _recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame;
            
            
            AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, NULL, 0, &_recordQueue);
            
            int bufferByteSize = ceil(ProcessPeo * _recordFormat.mSampleRate) * _recordFormat.mBytesPerFrame;
            
            for (int i = 0; i < 1; i++){
                AudioQueueAllocateBuffer(_recordQueue, bufferByteSize, &_recBuffers[i]);
                AudioQueueEnqueueBuffer(_recordQueue, _recBuffers[i], 0, NULL);
            }
            
            AudioQueueStart(_recordQueue, NULL);
                //录音部分结束
        }
        else{
            //移动到上面处理
        }
    }];

录音的启动了,现在开始放音的

//播放部分开始
    AudioStreamBasicDescription audioFormat;
    bzero(&audioFormat, sizeof(AudioStreamBasicDescription));
    audioFormat.mSampleRate         = PlayBaSam;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger |
    kAudioFormatFlagsNativeEndian |
    kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = (audioFormat.mBitsPerChannel / 8) * audioFormat.mChannelsPerFrame;
    
    
    AudioQueueNewOutput(&audioFormat,outputBufferHandler, (__bridge void *)(self), NULL,NULL, 0, &_playQueue);
    int bufferByteSize = ceil(ProcessPeo * audioFormat.mSampleRate) * audioFormat.mBytesPerFrame;
    //上面的乘法是准备了缓冲区的秒数 我这里用了0.03秒,缓冲区越大延迟会越高
    //下面开始创建缓冲区
    for(int i=0;i<2;i++)
    {
        AudioQueueAllocateBuffer(_playQueue, bufferByteSize, &_playBuffers[i]);
        _playBuffers[i]->mAudioDataByteSize=bufferByteSize;
        outputBufferHandler(nil,_playQueue,_playBuffers[i]);
    }
    AudioQueueStart(_playQueue, NULL);

这样录音和播放部分就开始了,注意,AudioQueue的录音得到的数据处理和提供播放数据在回调里实现(和Android不同,属于被动的)

回调:真正实现录音和播放

首先实现录音的回调

void inputBufferHandler(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc)
{
    if (inNumPackets > 0) {
        //数据在inBuffer->mAudioData  数据大小:inNumPackets
        }
    }
    AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL); 
    //这里准备缓冲区 继续执行下去
}

播放回调:

static void outputBufferHandler(void *inUserData,AudioQueueRef inAQ,AudioQueueBufferRef buffer){
    uint error=0;
    填充buffer->mAudioData 大小是缓冲区大小
    AudioQueueEnqueueBuffer(inAQ, buffer, 0, NULL);
}

特别注意:播放回调中必须一直填充缓冲区数据,否则播放会被自动停止

这样下来,简单

PS. 刚学Objective-C 不到1个月,如果文章中有问题,欢迎批评指正!


maozhenyu
125 声望1 粉丝