Rewrite media decoder to use FFMpeg 3.2+ API (task #4686)

pull/1978/head
Andrei Kortunov 6 years ago
parent 261bbded00
commit f88d5e808c

@ -3,6 +3,7 @@
Feature #2229: Improve pathfinding AI
Feature #3442: Default values for fallbacks from ini file
Task #4686: Upgrade media decoder to a more current FFmpeg API
0.45.0

@ -6,5 +6,5 @@ brew outdated cmake || brew upgrade cmake
brew outdated pkgconfig || brew upgrade pkgconfig
brew install qt
curl -fSL -R -J https://downloads.openmw.org/osx/dependencies/openmw-deps-4eec887.zip -o ~/openmw-deps.zip
curl -fSL -R -J https://downloads.openmw.org/osx/dependencies/openmw-deps-7cf2789.zip -o ~/openmw-deps.zip
unzip -o ~/openmw-deps.zip -d /private/tmp/openmw-deps > /dev/null

@ -153,7 +153,52 @@ if (USE_QT)
endif()
# Sound setup
# Require at least ffmpeg 3.2 for now
SET(FFVER_OK FALSE)
find_package(FFmpeg REQUIRED COMPONENTS AVCODEC AVFORMAT AVUTIL SWSCALE SWRESAMPLE)
if(FFmpeg_FOUND)
SET(FFVER_OK TRUE)
# Can not detect FFmpeg version on Windows for now
if (NOT WIN32)
if(FFmpeg_AVFORMAT_VERSION VERSION_LESS "57.56.100")
message(STATUS "libavformat is too old! (${FFmpeg_AVFORMAT_VERSION}, wanted 57.56.100)")
set(FFVER_OK FALSE)
endif()
if(FFmpeg_AVCODEC_VERSION VERSION_LESS "57.64.100")
message(STATUS "libavcodec is too old! (${FFmpeg_AVCODEC_VERSION}, wanted 57.64.100)")
set(FFVER_OK FALSE)
endif()
if(FFmpeg_AVUTIL_VERSION VERSION_LESS "55.34.100")
message(STATUS "libavutil is too old! (${FFmpeg_AVUTIL_VERSION}, wanted 55.34.100)")
set(FFVER_OK FALSE)
endif()
if(FFmpeg_SWSCALE_VERSION VERSION_LESS "4.2.100")
message(STATUS "libswscale is too old! (${FFmpeg_SWSCALE_VERSION}, wanted 4.2.100)")
set(FFVER_OK FALSE)
endif()
if(FFmpeg_SWRESAMPLE_VERSION VERSION_LESS "2.3.100")
message(STATUS "libswresample is too old! (${FFmpeg_SWRESAMPLE_VERSION}, wanted 2.3.100)")
set(FFVER_OK FALSE)
endif()
endif()
endif()
if(NOT FFmpeg_FOUND)
message(FATAL_ERROR "FFmpeg was not found" )
endif()
if(NOT FFVER_OK)
message(FATAL_ERROR "FFmpeg version is too old, 3.2 is required" )
endif()
if(WIN32)
message("Can not detect FFmpeg version, at least the 3.2 is required" )
endif()
# Required for building the FFmpeg headers
add_definitions(-D__STDC_CONSTANT_MACROS)

@ -82,7 +82,7 @@ bool FFmpeg_Decoder::getNextPacket()
}
/* Free the packet and look for another */
av_free_packet(&mPacket);
av_packet_unref(&mPacket);
}
return false;
@ -90,9 +90,9 @@ bool FFmpeg_Decoder::getNextPacket()
bool FFmpeg_Decoder::getAVAudioData()
{
int got_frame;
bool got_frame;
if((*mStream)->codec->codec_type != AVMEDIA_TYPE_AUDIO)
if(mCodecCtx->codec_type != AVMEDIA_TYPE_AUDIO)
return false;
do {
@ -100,19 +100,18 @@ bool FFmpeg_Decoder::getAVAudioData()
return false;
/* Decode some data, and check for errors */
int len = 0;
if((len=avcodec_decode_audio4((*mStream)->codec, mFrame, &got_frame, &mPacket)) < 0)
int ret = 0;
ret = avcodec_receive_frame(mCodecCtx, mFrame);
if (ret == 0)
got_frame = true;
if (ret == AVERROR(EAGAIN))
ret = 0;
if (ret == 0)
ret = avcodec_send_packet(mCodecCtx, &mPacket);
if (ret < 0 && ret != AVERROR(EAGAIN))
return false;
/* Move the unread data to the front and clear the end bits */
int remaining = mPacket.size - len;
if(remaining <= 0)
av_free_packet(&mPacket);
else
{
memmove(mPacket.data, &mPacket.data[len], remaining);
av_shrink_packet(&mPacket, remaining);
}
av_packet_unref(&mPacket);
if (!got_frame || mFrame->nb_samples == 0)
continue;
@ -139,8 +138,8 @@ bool FFmpeg_Decoder::getAVAudioData()
else
mFrameData = &mFrame->data[0];
} while(got_frame == 0 || mFrame->nb_samples == 0);
mNextPts += (double)mFrame->nb_samples / (double)(*mStream)->codec->sample_rate;
} while(!got_frame || mFrame->nb_samples == 0);
mNextPts += (double)mFrame->nb_samples / mCodecCtx->sample_rate;
return true;
}
@ -213,7 +212,7 @@ void FFmpeg_Decoder::open(const std::string &fname)
for(size_t j = 0;j < mFormatCtx->nb_streams;j++)
{
if(mFormatCtx->streams[j]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
if(mFormatCtx->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
mStream = &mFormatCtx->streams[j];
break;
@ -222,39 +221,48 @@ void FFmpeg_Decoder::open(const std::string &fname)
if(!mStream)
throw std::runtime_error("No audio streams in "+fname);
(*mStream)->codec->request_sample_fmt = (*mStream)->codec->sample_fmt;
AVCodec *codec = avcodec_find_decoder((*mStream)->codec->codec_id);
AVCodec *codec = avcodec_find_decoder((*mStream)->codecpar->codec_id);
if(!codec)
{
std::string ss = "No codec found for id " +
std::to_string((*mStream)->codec->codec_id);
std::to_string((*mStream)->codecpar->codec_id);
throw std::runtime_error(ss);
}
if(avcodec_open2((*mStream)->codec, codec, nullptr) < 0)
throw std::runtime_error(std::string("Failed to open audio codec ") +
codec->long_name);
AVCodecContext *avctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(avctx, (*mStream)->codecpar);
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(avctx, (*mStream)->time_base);
#endif
mCodecCtx = avctx;
if(avcodec_open2(mCodecCtx, codec, nullptr) < 0)
throw std::runtime_error(std::string("Failed to open audio codec ") + codec->long_name);
mFrame = av_frame_alloc();
if((*mStream)->codec->sample_fmt == AV_SAMPLE_FMT_FLT ||
(*mStream)->codec->sample_fmt == AV_SAMPLE_FMT_FLTP)
if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP)
mOutputSampleFormat = AV_SAMPLE_FMT_S16; // FIXME: Check for AL_EXT_FLOAT32 support
else if((*mStream)->codec->sample_fmt == AV_SAMPLE_FMT_U8P)
else if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
mOutputSampleFormat = AV_SAMPLE_FMT_U8;
else if((*mStream)->codec->sample_fmt == AV_SAMPLE_FMT_S16P)
else if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S16P)
mOutputSampleFormat = AV_SAMPLE_FMT_S16;
else
mOutputSampleFormat = AV_SAMPLE_FMT_S16;
mOutputChannelLayout = (*mStream)->codec->channel_layout;
mOutputChannelLayout = (*mStream)->codecpar->channel_layout;
if(mOutputChannelLayout == 0)
mOutputChannelLayout = av_get_default_channel_layout((*mStream)->codec->channels);
mOutputChannelLayout = av_get_default_channel_layout(mCodecCtx->channels);
mCodecCtx->channel_layout = mOutputChannelLayout;
}
catch(...)
{
if(mStream)
avcodec_close((*mStream)->codec);
avcodec_free_context(&mCodecCtx);
mStream = nullptr;
if (mFormatCtx != nullptr)
@ -275,10 +283,10 @@ void FFmpeg_Decoder::open(const std::string &fname)
void FFmpeg_Decoder::close()
{
if(mStream)
avcodec_close((*mStream)->codec);
avcodec_free_context(&mCodecCtx);
mStream = nullptr;
av_free_packet(&mPacket);
av_packet_unref(&mPacket);
av_freep(&mFrame);
swr_free(&mSwr);
av_freep(&mDataBuf);
@ -308,7 +316,12 @@ void FFmpeg_Decoder::close()
std::string FFmpeg_Decoder::getName()
{
// In the FFMpeg 4.0 a "filename" field was replaced by "url"
#if LIBAVCODEC_VERSION_INT < 3805796
return mFormatCtx->filename;
#else
return mFormatCtx->url;
#endif
}
void FFmpeg_Decoder::getInfo(int *samplerate, ChannelConfig *chans, SampleType *type)
@ -341,11 +354,10 @@ void FFmpeg_Decoder::getInfo(int *samplerate, ChannelConfig *chans, SampleType *
else
{
char str[1024];
av_get_channel_layout_string(str, sizeof(str), (*mStream)->codec->channels,
(*mStream)->codec->channel_layout);
av_get_channel_layout_string(str, sizeof(str), mCodecCtx->channels, mCodecCtx->channel_layout);
Log(Debug::Error) << "Unsupported channel layout: "<< str;
if((*mStream)->codec->channels == 1)
if(mCodecCtx->channels == 1)
{
mOutputChannelLayout = AV_CH_LAYOUT_MONO;
*chans = ChannelConfig_Mono;
@ -357,27 +369,28 @@ void FFmpeg_Decoder::getInfo(int *samplerate, ChannelConfig *chans, SampleType *
}
}
*samplerate = (*mStream)->codec->sample_rate;
int64_t ch_layout = (*mStream)->codec->channel_layout;
*samplerate = mCodecCtx->sample_rate;
int64_t ch_layout = mCodecCtx->channel_layout;
if(ch_layout == 0)
ch_layout = av_get_default_channel_layout((*mStream)->codec->channels);
ch_layout = av_get_default_channel_layout(mCodecCtx->channels);
if(mOutputSampleFormat != (*mStream)->codec->sample_fmt ||
if(mOutputSampleFormat != mCodecCtx->sample_fmt ||
mOutputChannelLayout != ch_layout)
{
mSwr = swr_alloc_set_opts(mSwr, // SwrContext
mOutputChannelLayout, // output ch layout
mOutputSampleFormat, // output sample format
(*mStream)->codec->sample_rate, // output sample rate
mCodecCtx->sample_rate, // output sample rate
ch_layout, // input ch layout
(*mStream)->codec->sample_fmt, // input sample format
(*mStream)->codec->sample_rate, // input sample rate
mCodecCtx->sample_fmt, // input sample format
mCodecCtx->sample_rate, // input sample rate
0, // logging level offset
nullptr); // log context
if(!mSwr)
throw std::runtime_error("Couldn't allocate SwrContext");
if(swr_init(mSwr) < 0)
throw std::runtime_error("Couldn't initialize SwrContext");
int init=swr_init(mSwr);
if(init < 0)
throw std::runtime_error("Couldn't initialize SwrContext: "+std::to_string(init));
}
}
@ -412,7 +425,7 @@ size_t FFmpeg_Decoder::getSampleOffset()
{
int delay = (mFrameSize-mFramePos) / av_get_channel_layout_nb_channels(mOutputChannelLayout) /
av_get_bytes_per_sample(mOutputSampleFormat);
return (int)(mNextPts*(*mStream)->codec->sample_rate) - delay;
return (int)(mNextPts*mCodecCtx->sample_rate) - delay;
}
FFmpeg_Decoder::FFmpeg_Decoder(const VFS::Manager* vfs)
@ -437,7 +450,10 @@ FFmpeg_Decoder::FFmpeg_Decoder(const VFS::Manager* vfs)
static bool done_init = false;
if(!done_init)
{
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_register_all();
#endif
av_log_set_level(AV_LOG_ERROR);
done_init = true;
}

@ -6,18 +6,7 @@ extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
// From libavutil version 52.2.0 and onward the declaration of
// AV_CH_LAYOUT_* is removed from libavcodec/avcodec.h and moved to
// libavutil/channel_layout.h
#if AV_VERSION_INT(52, 2, 0) <= AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, LIBAVUTIL_VERSION_MICRO)
#include <libavutil/channel_layout.h>
#endif
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
#include <libavutil/channel_layout.h>
// From version 54.56 binkaudio encoding format changed from S16 to FLTP. See:
// https://gitorious.org/ffmpeg/ffmpeg/commit/7bfd1766d1c18f07b0a2dd042418a874d49ea60d
@ -38,6 +27,7 @@ namespace MWSound
class FFmpeg_Decoder final : public Sound_Decoder
{
AVFormatContext *mFormatCtx;
AVCodecContext *mCodecCtx;
AVStream **mStream;
AVPacket mPacket;

@ -48,7 +48,7 @@ namespace MWSound
{
ssize_t clock_delay = (mFrameSize-mFramePos) / av_get_channel_layout_nb_channels(mOutputChannelLayout) /
av_get_bytes_per_sample(mOutputSampleFormat);
return (size_t)(mAudioClock*mAVStream->codec->sample_rate) - clock_delay;
return (size_t)(mAudioClock*mAudioContext->sample_rate) - clock_delay;
}
std::string getStreamName()
@ -61,7 +61,7 @@ namespace MWSound
virtual double getAudioClock()
{
return (double)getSampleOffset()/(double)mAVStream->codec->sample_rate -
return (double)getSampleOffset()/(double)mAudioContext->sample_rate -
MWBase::Environment::get().getSoundManager()->getTrackTimeDelay(mAudioTrack);
}

@ -6,15 +6,9 @@
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
}
#include "videostate.hpp"
@ -61,6 +55,7 @@ MovieAudioDecoder::MovieAudioDecoder(VideoState* videoState)
, mFrameData(NULL)
, mDataBufLen(0)
, mFrame(av_frame_alloc())
, mGetNextPacket(true)
, mAudioDiffAccum(0.0)
, mAudioDiffAvgCoef(exp(log(0.01 / AUDIO_DIFF_AVG_NB)))
/* Correct audio only if larger error than this */
@ -68,10 +63,34 @@ MovieAudioDecoder::MovieAudioDecoder(VideoState* videoState)
, mAudioDiffAvgCount(0)
{
mAudioResampler.reset(new AudioResampler());
AVCodec *codec = avcodec_find_decoder(mAVStream->codecpar->codec_id);
if(!codec)
{
std::string ss = "No codec found for id " +
std::to_string(mAVStream->codecpar->codec_id);
throw std::runtime_error(ss);
}
AVCodecContext *avctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(avctx, mAVStream->codecpar);
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(avctx, mAVStream->time_base);
#endif
mAudioContext = avctx;
if(avcodec_open2(mAudioContext, codec, nullptr) < 0)
throw std::runtime_error(std::string("Failed to open audio codec ") + codec->long_name);
}
MovieAudioDecoder::~MovieAudioDecoder()
{
if(mAudioContext)
avcodec_free_context(&mAudioContext);
av_freep(&mFrame);
av_freep(&mDataBuf);
}
@ -81,13 +100,13 @@ void MovieAudioDecoder::setupFormat()
if (mAudioResampler->mSwr)
return; // already set up
AVSampleFormat inputSampleFormat = mAVStream->codec->sample_fmt;
AVSampleFormat inputSampleFormat = mAudioContext->sample_fmt;
uint64_t inputChannelLayout = mAVStream->codec->channel_layout;
uint64_t inputChannelLayout = mAudioContext->channel_layout;
if (inputChannelLayout == 0)
inputChannelLayout = av_get_default_channel_layout(mAVStream->codec->channels);
inputChannelLayout = av_get_default_channel_layout(mAudioContext->channels);
int inputSampleRate = mAVStream->codec->sample_rate;
int inputSampleRate = mAudioContext->sample_rate;
mOutputSampleRate = inputSampleRate;
mOutputSampleFormat = inputSampleFormat;
@ -133,7 +152,7 @@ int MovieAudioDecoder::synchronize_audio()
{
int n = av_get_bytes_per_sample(mOutputSampleFormat) *
av_get_channel_layout_nb_channels(mOutputChannelLayout);
sample_skip = ((int)(diff * mAVStream->codec->sample_rate) * n);
sample_skip = ((int)(diff * mAudioContext->sample_rate) * n);
}
}
@ -146,23 +165,31 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
for(;;)
{
while(pkt->size > 0)
{
int len1, got_frame;
len1 = avcodec_decode_audio4(mAVStream->codec, frame, &got_frame, pkt);
if(len1 < 0) break;
/* send the packet with the compressed data to the decoder */
int ret = 0;
if (mGetNextPacket)
ret = avcodec_send_packet(mAudioContext, pkt);
if(len1 <= pkt->size)
/* read all the output frames (in general there may be any number of them */
while (ret >= 0)
{
ret = avcodec_receive_frame(mAudioContext, frame);
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
{
/* Move the unread data to the front and clear the end bits */
int remaining = pkt->size - len1;
memmove(pkt->data, &pkt->data[len1], remaining);
av_shrink_packet(pkt, remaining);
// EAGAIN means that we need additional packages to decode this frame.
// AVERROR_EOF means the end of package.
mGetNextPacket = true;
break;
}
else if (ret < 0)
{
// Error encountered. Stop to decode audio stream.
av_packet_unref(&mPacket);
mGetNextPacket = true;
return -1;
}
/* No data yet? Look for more frames */
if(!got_frame || frame->nb_samples <= 0)
if(frame->nb_samples <= 0)
continue;
if(mAudioResampler->mSwr)
@ -170,7 +197,7 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
if(!mDataBuf || mDataBufLen < frame->nb_samples)
{
av_freep(&mDataBuf);
if(av_samples_alloc(&mDataBuf, NULL, av_get_channel_layout_nb_channels(mOutputChannelLayout),
if(av_samples_alloc(&mDataBuf, nullptr, av_get_channel_layout_nb_channels(mOutputChannelLayout),
frame->nb_samples, mOutputSampleFormat, 0) < 0)
break;
else
@ -187,14 +214,16 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
else
mFrameData = &frame->data[0];
mAudioClock += (double)frame->nb_samples /
(double)mAVStream->codec->sample_rate;
int result = frame->nb_samples * av_get_channel_layout_nb_channels(mOutputChannelLayout) *
av_get_bytes_per_sample(mOutputSampleFormat);
/* We have data, return it and come back for more later */
return frame->nb_samples * av_get_channel_layout_nb_channels(mOutputChannelLayout) *
av_get_bytes_per_sample(mOutputSampleFormat);
mGetNextPacket = false;
return result;
}
av_free_packet(pkt);
av_packet_unref(&mPacket);
mGetNextPacket = true;
/* next packet */
if(mVideoState->audioq.get(pkt, mVideoState) < 0)
@ -202,7 +231,7 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
if(pkt->data == mVideoState->mFlushPktData)
{
avcodec_flush_buffers(mAVStream->codec);
avcodec_flush_buffers(mAudioContext);
mAudioDiffAccum = 0.0;
mAudioDiffAvgCount = 0;
mAudioClock = av_q2d(mAVStream->time_base)*pkt->pts;

@ -11,11 +11,7 @@ extern "C"
#include <libavutil/avutil.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#if AV_VERSION_INT(52, 2, 0) <= AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, LIBAVUTIL_VERSION_MICRO)
#include <libavutil/channel_layout.h>
#endif
#include <libavutil/channel_layout.h>
}
#if defined(_WIN32) && !defined(__MINGW32__)
@ -35,6 +31,7 @@ class MovieAudioDecoder
{
protected:
VideoState *mVideoState;
AVCodecContext* mAudioContext;
AVStream *mAVStream;
enum AVSampleFormat mOutputSampleFormat;
uint64_t mOutputChannelLayout;
@ -51,7 +48,7 @@ private:
throw std::bad_alloc();
}
~AutoAVPacket()
{ av_free_packet(this); }
{ av_packet_unref(this); }
};
@ -63,6 +60,7 @@ private:
AutoAVPacket mPacket;
AVFrame *mFrame;
bool mGetNextPacket;
/* averaging filter for audio sync */
double mAudioDiffAccum;

@ -11,19 +11,7 @@ extern "C"
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
// From libavformat version 55.0.100 and onward the declaration of av_gettime() is
// removed from libavformat/avformat.h and moved to libavutil/time.h
// https://github.com/FFmpeg/FFmpeg/commit/06a83505992d5f49846c18507a6c3eb8a47c650e
#if AV_VERSION_INT(55, 0, 100) <= AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, LIBAVFORMAT_VERSION_MICRO)
#include <libavutil/time.h>
#endif
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
#include <libavutil/time.h>
}
static const char* flushString = "FLUSH";
@ -54,6 +42,8 @@ namespace Video
VideoState::VideoState()
: mAudioFactory(NULL)
, format_ctx(NULL)
, video_ctx(NULL)
, audio_ctx(NULL)
, av_sync_type(AV_SYNC_DEFAULT)
, audio_st(NULL)
, video_st(NULL), frame_last_pts(0.0)
@ -67,8 +57,10 @@ VideoState::VideoState()
{
mFlushPktData = flush_pkt.data;
// Register all formats and codecs
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_register_all();
#endif
}
VideoState::~VideoState()
@ -85,11 +77,12 @@ void VideoState::setAudioFactory(MovieAudioFactory *factory)
void PacketQueue::put(AVPacket *pkt)
{
AVPacketList *pkt1;
if(pkt != &flush_pkt && !pkt->buf && av_dup_packet(pkt) < 0)
throw std::runtime_error("Failed to duplicate packet");
pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
if(!pkt1) throw std::bad_alloc();
if(pkt != &flush_pkt && !pkt->buf && av_packet_ref(&pkt1->pkt, pkt) < 0)
throw std::runtime_error("Failed to duplicate packet");
pkt1->pkt = *pkt;
pkt1->next = NULL;
@ -150,7 +143,7 @@ void PacketQueue::clear()
{
pkt1 = pkt->next;
if (pkt->pkt.data != flush_pkt.data)
av_free_packet(&pkt->pkt);
av_packet_unref(&pkt->pkt);
av_freep(&pkt);
}
this->last_pkt = NULL;
@ -211,7 +204,7 @@ int64_t VideoState::istream_seek(void *user_data, int64_t offset, int whence)
void VideoState::video_display(VideoPicture *vp)
{
if((*this->video_st)->codec->width != 0 && (*this->video_st)->codec->height != 0)
if(this->video_ctx->width != 0 && this->video_ctx->height != 0)
{
if (!mTexture.get())
{
@ -224,7 +217,7 @@ void VideoState::video_display(VideoPicture *vp)
osg::ref_ptr<osg::Image> image = new osg::Image;
image->setImage((*this->video_st)->codec->width, (*this->video_st)->codec->height,
image->setImage(this->video_ctx->width, this->video_ctx->height,
1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, &vp->data[0], osg::Image::NO_DELETE);
mTexture->setImage(image);
@ -303,9 +296,9 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
// matches a commonly used format (ie YUV420P)
if(this->sws_context == NULL)
{
int w = (*this->video_st)->codec->width;
int h = (*this->video_st)->codec->height;
this->sws_context = sws_getContext(w, h, (*this->video_st)->codec->pix_fmt,
int w = this->video_ctx->width;
int h = this->video_ctx->height;
this->sws_context = sws_getContext(w, h, this->video_ctx->pix_fmt,
w, h, AV_PIX_FMT_RGBA, SWS_BICUBIC,
NULL, NULL, NULL);
if(this->sws_context == NULL)
@ -313,11 +306,11 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
}
vp->pts = pts;
vp->data.resize((*this->video_st)->codec->width * (*this->video_st)->codec->height * 4);
vp->data.resize(this->video_ctx->width * this->video_ctx->height * 4);
uint8_t *dst[4] = { &vp->data[0], nullptr, nullptr, nullptr };
sws_scale(this->sws_context, pFrame->data, pFrame->linesize,
0, (*this->video_st)->codec->height, dst, this->rgbaFrame->linesize);
0, this->video_ctx->height, dst, this->rgbaFrame->linesize);
// now we inform our display thread that we have a pic ready
this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE;
@ -338,7 +331,7 @@ double VideoState::synchronize_video(AVFrame *src_frame, double pts)
pts = this->video_clock;
/* update the video clock */
frame_delay = av_q2d((*this->video_st)->codec->time_base);
frame_delay = av_q2d(this->video_ctx->pkt_timebase);
/* if we are repeating a frame, adjust clock accordingly */
frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
@ -347,30 +340,6 @@ double VideoState::synchronize_video(AVFrame *src_frame, double pts)
return pts;
}
static void our_free_buffer(void *opaque, uint8_t *data);
/* These are called whenever we allocate a frame
* buffer. We use this to store the global_pts in
* a frame at the time it is allocated.
*/
static int64_t global_video_pkt_pts = AV_NOPTS_VALUE;
static int our_get_buffer(struct AVCodecContext *c, AVFrame *pic, int flags)
{
AVBufferRef *ref;
int ret = avcodec_default_get_buffer2(c, pic, flags);
int64_t *pts = (int64_t*)av_malloc(sizeof(int64_t));
*pts = global_video_pkt_pts;
pic->opaque = pts;
ref = av_buffer_create((uint8_t *)pic->opaque, sizeof(int64_t), our_free_buffer, pic->buf[0], flags);
pic->buf[0] = ref;
return ret;
}
static void our_free_buffer(void *opaque, uint8_t *data)
{
AVBufferRef *ref = (AVBufferRef *)opaque;
av_buffer_unref(&ref);
av_free(data);
}
class VideoThread : public OpenThreads::Thread
{
public:
@ -384,19 +353,18 @@ public:
{
VideoState* self = mVideoState;
AVPacket pkt1, *packet = &pkt1;
int frameFinished;
AVFrame *pFrame;
pFrame = av_frame_alloc();
self->rgbaFrame = av_frame_alloc();
avpicture_alloc((AVPicture*)self->rgbaFrame, AV_PIX_FMT_RGBA, (*self->video_st)->codec->width, (*self->video_st)->codec->height);
av_image_alloc(self->rgbaFrame->data, self->rgbaFrame->linesize, self->video_ctx->width, self->video_ctx->height, AV_PIX_FMT_RGBA, 1);
while(self->videoq.get(packet, self) >= 0)
{
if(packet->data == flush_pkt.data)
{
avcodec_flush_buffers((*self->video_st)->codec);
avcodec_flush_buffers(self->video_ctx);
self->pictq_mutex.lock();
self->pictq_size = 0;
@ -405,37 +373,36 @@ public:
self->pictq_mutex.unlock();
self->frame_last_pts = packet->pts * av_q2d((*self->video_st)->time_base);
global_video_pkt_pts = static_cast<int64_t>(self->frame_last_pts);
continue;
}
// Save global pts to be stored in pFrame
global_video_pkt_pts = packet->pts;
// Decode video frame
if(avcodec_decode_video2((*self->video_st)->codec, pFrame, &frameFinished, packet) < 0)
int ret = avcodec_send_packet(self->video_ctx, packet);
// EAGAIN is not expected
if (ret < 0)
throw std::runtime_error("Error decoding video frame");
double pts = 0;
if(packet->dts != AV_NOPTS_VALUE)
pts = static_cast<double>(packet->dts);
else if(pFrame->opaque && *(int64_t*)pFrame->opaque != AV_NOPTS_VALUE)
pts = static_cast<double>(*(int64_t*)pFrame->opaque);
pts *= av_q2d((*self->video_st)->time_base);
while (!ret)
{
ret = avcodec_receive_frame(self->video_ctx, pFrame);
if (!ret)
{
double pts = pFrame->best_effort_timestamp;
pts *= av_q2d((*self->video_st)->time_base);
av_free_packet(packet);
pts = self->synchronize_video(pFrame, pts);
// Did we get a video frame?
if(frameFinished)
{
pts = self->synchronize_video(pFrame, pts);
if(self->queue_picture(pFrame, pts) < 0)
break;
if(self->queue_picture(pFrame, pts) < 0)
break;
}
}
}
av_packet_unref(packet);
av_free(pFrame);
avpicture_free((AVPicture*)self->rgbaFrame);
av_freep(&self->rgbaFrame->data[0]);
av_free(self->rgbaFrame);
}
@ -497,7 +464,14 @@ public:
// AVSEEK_FLAG_BACKWARD appears to be needed, otherwise ffmpeg may seek to a keyframe *after* the given time
// we want to seek to any keyframe *before* the given time, so we can continue decoding as normal from there on
if(av_seek_frame(self->format_ctx, streamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0)
{
// In the FFMpeg 4.0 a "filename" field was replaced by "url"
#if LIBAVCODEC_VERSION_INT < 3805796
std::cerr << "Error seeking " << self->format_ctx->filename << std::endl;
#else
std::cerr << "Error seeking " << self->format_ctx->url << std::endl;
#endif
}
else
{
// Clear the packet queues and put a special packet with the new clock time
@ -548,7 +522,7 @@ public:
else if(self->audio_st && packet->stream_index == self->audio_st-pFormatCtx->streams)
self->audioq.put(packet);
else
av_free_packet(packet);
av_packet_unref(packet);
}
}
catch(std::exception& e) {
@ -572,30 +546,43 @@ bool VideoState::update()
int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
{
AVCodecContext *codecCtx;
AVCodec *codec;
if(stream_index < 0 || stream_index >= static_cast<int>(pFormatCtx->nb_streams))
return -1;
// Get a pointer to the codec context for the video stream
codecCtx = pFormatCtx->streams[stream_index]->codec;
codec = avcodec_find_decoder(codecCtx->codec_id);
if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0))
codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
if(!codec)
{
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
switch(codecCtx->codec_type)
switch(pFormatCtx->streams[stream_index]->codecpar->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
this->audio_st = pFormatCtx->streams + stream_index;
// Get a pointer to the codec context for the video stream
this->audio_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->audio_ctx, pFormatCtx->streams[stream_index]->codecpar);
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->audio_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif
if (avcodec_open2(this->audio_ctx, codec, NULL) < 0)
{
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
if (!mAudioFactory)
{
std::cerr << "No audio factory registered, can not play audio stream" << std::endl;
avcodec_close((*this->audio_st)->codec);
avcodec_free_context(&this->audio_ctx);
this->audio_st = NULL;
return -1;
}
@ -604,7 +591,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
if (!mAudioDecoder.get())
{
std::cerr << "Failed to create audio decoder, can not play audio stream" << std::endl;
avcodec_close((*this->audio_st)->codec);
avcodec_free_context(&this->audio_ctx);
this->audio_st = NULL;
return -1;
}
@ -614,7 +601,21 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
case AVMEDIA_TYPE_VIDEO:
this->video_st = pFormatCtx->streams + stream_index;
codecCtx->get_buffer2 = our_get_buffer;
// Get a pointer to the codec context for the video stream
this->video_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->video_ctx, pFormatCtx->streams[stream_index]->codecpar);
// This is not needed anymore above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->video_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif
if (avcodec_open2(this->video_ctx, codec, NULL) < 0)
{
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
this->video_thread.reset(new VideoThread(this));
break;
@ -680,9 +681,9 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
for(i = 0;i < this->format_ctx->nb_streams;i++)
{
if(this->format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
if(this->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
video_index = i;
if(this->format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
if(this->format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
audio_index = i;
}
@ -720,12 +721,14 @@ void VideoState::deinit()
this->video_thread.reset();
}
if(this->audio_st)
avcodec_close((*this->audio_st)->codec);
if(this->audio_ctx)
avcodec_free_context(&this->audio_ctx);
this->audio_st = NULL;
if(this->video_st)
avcodec_close((*this->video_st)->codec);
this->audio_ctx = NULL;
if(this->video_ctx)
avcodec_free_context(&this->video_ctx);
this->video_st = NULL;
this->video_ctx = NULL;
if(this->sws_context)
sws_freeContext(this->sws_context);

@ -15,6 +15,19 @@ namespace osg
class Texture2D;
}
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/channel_layout.h>
// From version 54.56 binkaudio encoding format changed from S16 to FLTP. See:
// https://gitorious.org/ffmpeg/ffmpeg/commit/7bfd1766d1c18f07b0a2dd042418a874d49ea60d
// https://ffmpeg.zeranoe.com/forum/viewtopic.php?f=15&t=872
#include <libswresample/swresample.h>
}
#include "videodefs.hpp"
#define VIDEO_PICTURE_QUEUE_SIZE 50
@ -131,6 +144,8 @@ struct VideoState {
std::shared_ptr<std::istream> stream;
AVFormatContext* format_ctx;
AVCodecContext* video_ctx;
AVCodecContext* audio_ctx;
int av_sync_type;

Loading…
Cancel
Save