先贴代码, 有空再补充。
H264ToRGB
H264 是帧间编码,需要连续多帧喂数据给AVCodecContext才能正确解码。
void H264ToRGB(unsigned char* data, unsigned int dataSize, unsigned char* outBuffer)
{
// 1. 将元数据装填到packet
AVPacket* avPkt = av_packet_alloc();
avPkt->size = dataSize;
avPkt->data = data;
static AVCodecContext* codecCtx = nullptr;
if (codecCtx == nullptr) {
// 2. 创建并配置codecContext
AVCodec* h264Codec = avcodec_find_decoder(AV_CODEC_ID_H264);
codecCtx = avcodec_alloc_context3(h264Codec);
avcodec_get_context_defaults3(codecCtx, h264Codec);
avcodec_open2(codecCtx, h264Codec, nullptr);
}
// 3. 解码
//avcodec_decode_video2(codecCtx, &outFrame, &lineLength, &avPkt); // 接口被弃用,使用下边接口代替
auto ret = avcodec_send_packet(codecCtx, avPkt);
if (ret >= 0) {
AVFrame* YUVFrame = av_frame_alloc();
ret = avcodec_receive_frame(codecCtx, YUVFrame);
if (ret >= 0) {
// 4.YUV转RGB24
AVFrame* RGB24Frame = av_frame_alloc();
struct SwsContext* convertCxt = sws_getContext(
YUVFrame->width, YUVFrame->height, AV_PIX_FMT_YUV420P,
YUVFrame->width, YUVFrame->height, AV_PIX_FMT_RGB24,
SWS_POINT, NULL, NULL, NULL
);
// outBuffer将会分配给RGB24Frame->data,AV_PIX_FMT_RGB24格式只分配到RGB24Frame->data[0]
av_image_fill_arrays(
RGB24Frame->data, RGB24Frame->linesize, outBuffer,
AV_PIX_FMT_RGB24, YUVFrame->width, YUVFrame->height,
1
);
sws_scale(convertCxt, YUVFrame->data, YUVFrame->linesize, 0, YUVFrame->height, RGB24Frame->data, RGB24Frame->linesize);
// 5.清除各对象/context -> 释放内存
// free context and avFrame
sws_freeContext(convertCxt);
av_frame_free(&RGB24Frame);
// RGB24Frame.
}
// free context and avFrame
av_frame_free(&YUVFrame);
}
// free context and avFrame
av_packet_unref(avPkt);
av_packet_free(&avPkt);
// avcodec_free_context(&codecCtx);
}
MJPEGToRGB
MJPEG 是帧内编码,每帧MJPEG帧都可以解码出对应RGB帧。
void MJPEGToRGB(unsigned char *data, unsigned int dataSize, unsigned char *outBuffer)
{
// 1. 将元数据装填到packet
AVPacket *avPkt = av_packet_alloc();
avPkt->size = dataSize;
avPkt->data = data;
// 2. 创建并配置codecContext
AVCodec *mjpegCodec = avcodec_find_decoder(AV_CODEC_ID_MJPEG);
AVCodecContext* codecCtx = avcodec_alloc_context3(mjpegCodec);
avcodec_get_context_defaults3(codecCtx, mjpegCodec);
avcodec_open2(codecCtx, mjpegCodec, nullptr);
// 3. 解码
//avcodec_decode_video2(codecCtx, &outFrame, &lineLength, &avPkt); // 接口被弃用,使用下边接口代替
auto ret = avcodec_send_packet(codecCtx, avPkt);
if (ret >=0) {
AVFrame* YUVFrame = av_frame_alloc();
ret = avcodec_receive_frame(codecCtx, YUVFrame);
if (ret >= 0) {
// 4.YUV转RGB24
AVFrame* RGB24Frame = av_frame_alloc();
struct SwsContext* convertCxt = sws_getContext(
YUVFrame->width, YUVFrame->height, AV_PIX_FMT_YUV420P,
YUVFrame->width, YUVFrame->height, AV_PIX_FMT_RGB24,
SWS_POINT, NULL, NULL, NULL
);
// outBuffer将会分配给RGB24Frame->data,AV_PIX_FMT_RGB24格式只分配到RGB24Frame->data[0]
av_image_fill_arrays(
RGB24Frame->data, RGB24Frame->linesize, outBuffer,
AV_PIX_FMT_RGB24, YUVFrame->width, YUVFrame->height,
1
);
sws_scale(convertCxt, YUVFrame->data, YUVFrame->linesize, 0, YUVFrame->height, RGB24Frame->data, RGB24Frame->linesize);
// 5.清除各对象/context -> 释放内存
// free context and avFrame
sws_freeContext(convertCxt);
av_frame_free(&RGB24Frame);
// RGB24Frame.
}
// free context and avFrame
av_frame_free(&YUVFrame);
}
// free context and avFrame
av_packet_unref(avPkt);
av_packet_free(&avPkt);
avcodec_free_context(&codecCtx);
}
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。