Videoplayer fixes, play/pause & seeking

- Fix rindex overflow
 - Fix audio sample size bugs (was using sample_fmt and channel count of the decoder, instead of the resampled settings). We didn't notice this bug before, because the OpenAL MovieAudioFactory tries to resample to a format of the same byte size.
 - Add support for play/pause and seeking controls (not used by cutscenes in OpenMW)
 - Closing the video when arriving at the stream end is now handled by the user (we may also want to keep the video open and seek back)

The video player now has a standalone demo, at https://github.com/scrawl/ogre-ffmpeg-videoplayer
loadfix
scrawl 10 years ago
parent e2bad395e5
commit b39d69e98c

@ -30,8 +30,7 @@ int VideoWidget::getVideoHeight()
bool VideoWidget::update() bool VideoWidget::update()
{ {
mPlayer.update(); return mPlayer.update();
return mPlayer.isPlaying();
} }
void VideoWidget::stop() void VideoWidget::stop()

@ -148,7 +148,7 @@ size_t FFmpeg_Decoder::readAVAudioData(void *data, size_t length)
break; break;
mFramePos = 0; mFramePos = 0;
mFrameSize = mFrame->nb_samples * (*mStream)->codec->channels * mFrameSize = mFrame->nb_samples * (*mStream)->codec->channels *
av_get_bytes_per_sample((*mStream)->codec->sample_fmt); av_get_bytes_per_sample(mOutputSampleFormat);
} }
/* Get the amount of bytes remaining to be written, and clamp to /* Get the amount of bytes remaining to be written, and clamp to
@ -384,7 +384,7 @@ void FFmpeg_Decoder::readAll(std::vector<char> &output)
while(getAVAudioData()) while(getAVAudioData())
{ {
size_t got = mFrame->nb_samples * (*mStream)->codec->channels * size_t got = mFrame->nb_samples * (*mStream)->codec->channels *
av_get_bytes_per_sample((*mStream)->codec->sample_fmt); av_get_bytes_per_sample(mOutputSampleFormat);
const char *inbuf = reinterpret_cast<char*>(mFrameData[0]); const char *inbuf = reinterpret_cast<char*>(mFrameData[0]);
output.insert(output.end(), inbuf, inbuf+got); output.insert(output.end(), inbuf, inbuf+got);
} }
@ -403,7 +403,7 @@ void FFmpeg_Decoder::rewind()
size_t FFmpeg_Decoder::getSampleOffset() size_t FFmpeg_Decoder::getSampleOffset()
{ {
int delay = (mFrameSize-mFramePos) / (*mStream)->codec->channels / int delay = (mFrameSize-mFramePos) / (*mStream)->codec->channels /
av_get_bytes_per_sample((*mStream)->codec->sample_fmt); av_get_bytes_per_sample(mOutputSampleFormat);
return (int)(mNextPts*(*mStream)->codec->sample_rate) - delay; return (int)(mNextPts*(*mStream)->codec->sample_rate) - delay;
} }

@ -44,8 +44,8 @@ namespace MWSound
size_t getSampleOffset() size_t getSampleOffset()
{ {
ssize_t clock_delay = (mFrameSize-mFramePos) / mAVStream->codec->channels / ssize_t clock_delay = (mFrameSize-mFramePos) / av_get_channel_layout_nb_channels(mOutputChannelLayout) /
av_get_bytes_per_sample(mAVStream->codec->sample_fmt); av_get_bytes_per_sample(mOutputSampleFormat);
return (size_t)(mAudioClock*mAVStream->codec->sample_rate) - clock_delay; return (size_t)(mAudioClock*mAVStream->codec->sample_rate) - clock_delay;
} }

@ -11,7 +11,6 @@ set(OGRE_FFMPEG_VIDEOPLAYER_SOURCE_FILES
audiofactory.hpp audiofactory.hpp
) )
# Find FFMPEG # Find FFMPEG
set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL SWSCALE SWRESAMPLE AVRESAMPLE) set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL SWSCALE SWRESAMPLE AVRESAMPLE)
unset(FFMPEG_LIBRARIES CACHE) unset(FFMPEG_LIBRARIES CACHE)
@ -30,10 +29,14 @@ else()
message(FATAL_ERROR "Install either libswresample (FFmpeg) or libavresample (Libav).") message(FATAL_ERROR "Install either libswresample (FFmpeg) or libavresample (Libav).")
endif() endif()
endif() endif()
include_directories(${FFMPEG_INCLUDE_DIRS}) include_directories(${FFMPEG_INCLUDE_DIRS})
# Find Boost
set(BOOST_COMPONENTS thread)
find_package(Boost REQUIRED COMPONENTS ${BOOST_COMPONENTS})
include_directories(${Boost_INCLUDE_DIRS})
add_library(${OGRE_FFMPEG_VIDEOPLAYER_LIBRARY} STATIC ${OGRE_FFMPEG_VIDEOPLAYER_SOURCE_FILES}) add_library(${OGRE_FFMPEG_VIDEOPLAYER_LIBRARY} STATIC ${OGRE_FFMPEG_VIDEOPLAYER_SOURCE_FILES})
target_link_libraries(${OGRE_FFMPEG_VIDEOPLAYER_LIBRARY} ${VIDEO_FFMPEG_LIBRARIES}) target_link_libraries(${OGRE_FFMPEG_VIDEOPLAYER_LIBRARY} ${VIDEO_FFMPEG_LIBRARIES} ${Boost_LIBRARIES})
link_directories(${CMAKE_CURRENT_BINARY_DIR}) link_directories(${CMAKE_CURRENT_BINARY_DIR})

@ -152,8 +152,8 @@ int MovieAudioDecoder::synchronize_audio()
double avg_diff = mAudioDiffAccum * (1.0 - mAudioDiffAvgCoef); double avg_diff = mAudioDiffAccum * (1.0 - mAudioDiffAvgCoef);
if(fabs(avg_diff) >= mAudioDiffThreshold) if(fabs(avg_diff) >= mAudioDiffThreshold)
{ {
int n = av_get_bytes_per_sample(mAVStream->codec->sample_fmt) * int n = av_get_bytes_per_sample(mOutputSampleFormat) *
mAVStream->codec->channels; av_get_channel_layout_nb_channels(mOutputChannelLayout);
sample_skip = ((int)(diff * mAVStream->codec->sample_rate) * n); sample_skip = ((int)(diff * mAVStream->codec->sample_rate) * n);
} }
} }
@ -161,7 +161,7 @@ int MovieAudioDecoder::synchronize_audio()
return sample_skip; return sample_skip;
} }
int MovieAudioDecoder::audio_decode_frame(AVFrame *frame) int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
{ {
AVPacket *pkt = &mPacket; AVPacket *pkt = &mPacket;
@ -191,7 +191,7 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame)
if(!mDataBuf || mDataBufLen < frame->nb_samples) if(!mDataBuf || mDataBufLen < frame->nb_samples)
{ {
av_freep(&mDataBuf); av_freep(&mDataBuf);
if(av_samples_alloc(&mDataBuf, NULL, mAVStream->codec->channels, if(av_samples_alloc(&mDataBuf, NULL, av_get_channel_layout_nb_channels(mOutputChannelLayout),
frame->nb_samples, mOutputSampleFormat, 0) < 0) frame->nb_samples, mOutputSampleFormat, 0) < 0)
break; break;
else else
@ -212,8 +212,8 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame)
(double)mAVStream->codec->sample_rate; (double)mAVStream->codec->sample_rate;
/* We have data, return it and come back for more later */ /* We have data, return it and come back for more later */
return frame->nb_samples * mAVStream->codec->channels * return frame->nb_samples * av_get_channel_layout_nb_channels(mOutputChannelLayout) *
av_get_bytes_per_sample(mAVStream->codec->sample_fmt); av_get_bytes_per_sample(mOutputSampleFormat);
} }
av_free_packet(pkt); av_free_packet(pkt);
@ -221,6 +221,18 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame)
if(mVideoState->audioq.get(pkt, mVideoState) < 0) if(mVideoState->audioq.get(pkt, mVideoState) < 0)
return -1; return -1;
if(pkt->data == mVideoState->mFlushPktData)
{
avcodec_flush_buffers(mAVStream->codec);
mAudioDiffAccum = 0.0;
mAudioDiffAvgCount = 0;
mAudioClock = av_q2d(mAVStream->time_base)*pkt->pts;
sample_skip = 0;
if(mVideoState->audioq.get(pkt, mVideoState) < 0)
return -1;
}
/* if update, update the audio clock w/pts */ /* if update, update the audio clock w/pts */
if((uint64_t)pkt->pts != AV_NOPTS_VALUE) if((uint64_t)pkt->pts != AV_NOPTS_VALUE)
mAudioClock = av_q2d(mAVStream->time_base)*pkt->pts; mAudioClock = av_q2d(mAVStream->time_base)*pkt->pts;
@ -229,6 +241,16 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame)
size_t MovieAudioDecoder::read(char *stream, size_t len) size_t MovieAudioDecoder::read(char *stream, size_t len)
{ {
if (mVideoState->mPaused)
{
// fill the buffer with silence
size_t sampleSize = av_get_bytes_per_sample(mOutputSampleFormat);
char* data[1];
data[0] = stream;
av_samples_set_silence((uint8_t**)data, 0, len/sampleSize, 1, mOutputSampleFormat);
return len;
}
int sample_skip = synchronize_audio(); int sample_skip = synchronize_audio();
size_t total = 0; size_t total = 0;
@ -237,7 +259,7 @@ size_t MovieAudioDecoder::read(char *stream, size_t len)
if(mFramePos >= mFrameSize) if(mFramePos >= mFrameSize)
{ {
/* We have already sent all our data; get more */ /* We have already sent all our data; get more */
mFrameSize = audio_decode_frame(mFrame); mFrameSize = audio_decode_frame(mFrame, sample_skip);
if(mFrameSize < 0) if(mFrameSize < 0)
{ {
/* If error, we're done */ /* If error, we're done */
@ -260,8 +282,8 @@ size_t MovieAudioDecoder::read(char *stream, size_t len)
{ {
len1 = std::min<size_t>(len1, -mFramePos); len1 = std::min<size_t>(len1, -mFramePos);
int n = av_get_bytes_per_sample(mAVStream->codec->sample_fmt) * int n = av_get_bytes_per_sample(mOutputSampleFormat)
mAVStream->codec->channels; * av_get_channel_layout_nb_channels(mOutputChannelLayout);
/* add samples by copying the first sample*/ /* add samples by copying the first sample*/
if(n == 1) if(n == 1)

@ -77,7 +77,8 @@ private:
* skip (negative means to duplicate). */ * skip (negative means to duplicate). */
int synchronize_audio(); int synchronize_audio();
int audio_decode_frame(AVFrame *frame); /// @param sample_skip If seeking happened, the sample_skip variable will be reset to 0.
int audio_decode_frame(AVFrame *frame, int &sample_skip);
public: public:
MovieAudioDecoder(VideoState *is); MovieAudioDecoder(VideoState *is);
@ -101,6 +102,8 @@ public:
virtual double getAudioClock(); virtual double getAudioClock();
/// This is the main interface to be used by the user's audio library. /// This is the main interface to be used by the user's audio library.
/// @par Request filling the \a stream with \a len number of bytes.
/// @return The number of bytes read (may not be the requested number if we arrived at the end of the audio stream)
size_t read(char *stream, size_t len); size_t read(char *stream, size_t len);
}; };

@ -38,19 +38,17 @@ void VideoPlayer::playVideo(const std::string &resourceName)
} }
} }
void VideoPlayer::update () bool VideoPlayer::update ()
{ {
if(mState) if(mState)
{ return mState->update();
if(!mState->update()) return false;
close();
}
} }
std::string VideoPlayer::getTextureName() std::string VideoPlayer::getTextureName()
{ {
std::string name; std::string name;
if (mState) if (mState && !mState->mTexture.isNull())
name = mState->mTexture->getName(); name = mState->mTexture->getName();
return name; return name;
} }
@ -58,7 +56,7 @@ std::string VideoPlayer::getTextureName()
int VideoPlayer::getVideoWidth() int VideoPlayer::getVideoWidth()
{ {
int width=0; int width=0;
if (mState) if (mState && !mState->mTexture.isNull())
width = mState->mTexture->getWidth(); width = mState->mTexture->getWidth();
return width; return width;
} }
@ -66,7 +64,7 @@ int VideoPlayer::getVideoWidth()
int VideoPlayer::getVideoHeight() int VideoPlayer::getVideoHeight()
{ {
int height=0; int height=0;
if (mState) if (mState && !mState->mTexture.isNull())
height = mState->mTexture->getHeight(); height = mState->mTexture->getHeight();
return height; return height;
} }
@ -82,14 +80,48 @@ void VideoPlayer::close()
} }
} }
bool VideoPlayer::isPlaying () bool VideoPlayer::hasAudioStream()
{
return mState && mState->audio_st != NULL;
}
void VideoPlayer::play()
{ {
return mState != NULL; if (mState)
mState->setPaused(false);
} }
bool VideoPlayer::hasAudioStream() void VideoPlayer::pause()
{ {
return mState && mState->audio_st != NULL; if (mState)
mState->setPaused(true);
}
bool VideoPlayer::isPaused()
{
if (mState)
return mState->mPaused;
return true;
}
double VideoPlayer::getCurrentTime()
{
if (mState)
return mState->get_master_clock();
return 0.0;
}
void VideoPlayer::seek(double time)
{
if (mState)
mState->seekTo(time);
}
double VideoPlayer::getDuration()
{
if (mState)
return mState->getDuration();
return 0.0;
} }
} }

@ -29,16 +29,29 @@ namespace Video
bool hasAudioStream(); bool hasAudioStream();
/// Play the given video. If a video is already playing, the old video is closed first. /// Play the given video. If a video is already playing, the old video is closed first.
/// @note The video will be unpaused by default. Use the pause() and play() methods to control pausing.
void playVideo (const std::string& resourceName); void playVideo (const std::string& resourceName);
/// Get the current playback time position in the video, in seconds
double getCurrentTime();
/// Get the duration of the video in seconds
double getDuration();
/// Seek to the specified time position in the video
void seek(double time);
void play();
void pause();
bool isPaused();
/// This should be called every frame by the user to update the video texture. /// This should be called every frame by the user to update the video texture.
void update(); /// @return Returns true if the video is still playing, false if we have reached the end of the video stream.
bool update();
/// Stop the currently playing video, if a video is playing. /// Stop the currently playing video, if a video is playing.
void close(); void close();
bool isPlaying();
/// Return the texture name of the currently playing video, or "" if no video is playing. /// Return the texture name of the currently playing video, or "" if no video is playing.
std::string getTextureName(); std::string getTextureName();
/// Return the width of the currently playing video, or 0 if no video is playing. /// Return the width of the currently playing video, or 0 if no video is playing.

@ -31,6 +31,18 @@ extern "C"
} }
static const char* flushString = "FLUSH";
struct FlushPacket : AVPacket
{
FlushPacket()
: AVPacket()
{
data = ( (uint8_t*)flushString);
}
};
static FlushPacket flush_pkt;
#include "videoplayer.hpp" #include "videoplayer.hpp"
#include "audiodecoder.hpp" #include "audiodecoder.hpp"
#include "audiofactory.hpp" #include "audiofactory.hpp"
@ -46,14 +58,18 @@ namespace Video
VideoState::VideoState() VideoState::VideoState()
: format_ctx(NULL), av_sync_type(AV_SYNC_DEFAULT) : format_ctx(NULL), av_sync_type(AV_SYNC_DEFAULT)
, external_clock_base(0.0)
, audio_st(NULL) , audio_st(NULL)
, video_st(NULL), frame_last_pts(0.0) , video_st(NULL), frame_last_pts(0.0)
, video_clock(0.0), sws_context(NULL), rgbaFrame(NULL), pictq_size(0) , video_clock(0.0), sws_context(NULL), rgbaFrame(NULL), pictq_size(0)
, pictq_rindex(0), pictq_windex(0) , pictq_rindex(0), pictq_windex(0)
, quit(false) , mQuit(false), mPaused(false)
, mAudioFactory(NULL) , mAudioFactory(NULL)
, mSeekRequested(false)
, mSeekPos(0)
, mVideoEnded(false)
{ {
mFlushPktData = flush_pkt.data;
// Register all formats and codecs // Register all formats and codecs
av_register_all(); av_register_all();
} }
@ -77,7 +93,7 @@ void PacketQueue::put(AVPacket *pkt)
pkt1->pkt = *pkt; pkt1->pkt = *pkt;
pkt1->next = NULL; pkt1->next = NULL;
if(pkt1->pkt.destruct == NULL) if(pkt->data != flush_pkt.data && pkt1->pkt.destruct == NULL)
{ {
if(av_dup_packet(&pkt1->pkt) < 0) if(av_dup_packet(&pkt1->pkt) < 0)
{ {
@ -104,7 +120,7 @@ void PacketQueue::put(AVPacket *pkt)
int PacketQueue::get(AVPacket *pkt, VideoState *is) int PacketQueue::get(AVPacket *pkt, VideoState *is)
{ {
boost::unique_lock<boost::mutex> lock(this->mutex); boost::unique_lock<boost::mutex> lock(this->mutex);
while(!is->quit) while(!is->mQuit)
{ {
AVPacketList *pkt1 = this->first_pkt; AVPacketList *pkt1 = this->first_pkt;
if(pkt1) if(pkt1)
@ -143,6 +159,7 @@ void PacketQueue::clear()
for(pkt = this->first_pkt; pkt != NULL; pkt = pkt1) for(pkt = this->first_pkt; pkt != NULL; pkt = pkt1)
{ {
pkt1 = pkt->next; pkt1 = pkt->next;
if (pkt->pkt.data != flush_pkt.data)
av_free_packet(&pkt->pkt); av_free_packet(&pkt->pkt);
av_freep(&pkt); av_freep(&pkt);
} }
@ -205,6 +222,7 @@ void VideoState::video_display(VideoPicture *vp)
void VideoState::video_refresh() void VideoState::video_refresh()
{ {
boost::mutex::scoped_lock lock(this->pictq_mutex);
if(this->pictq_size == 0) if(this->pictq_size == 0)
return; return;
@ -212,12 +230,11 @@ void VideoState::video_refresh()
{ {
VideoPicture* vp = &this->pictq[this->pictq_rindex]; VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp); this->video_display(vp);
this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_QUEUE_SIZE; this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_QUEUE_SIZE;
this->frame_last_pts = vp->pts; this->frame_last_pts = vp->pts;
this->pictq_mutex.lock();
this->pictq_size--; this->pictq_size--;
this->pictq_cond.notify_one(); this->pictq_cond.notify_one();
this->pictq_mutex.unlock();
} }
else else
{ {
@ -236,19 +253,18 @@ void VideoState::video_refresh()
break; break;
} }
assert (this->pictq_rindex < VIDEO_PICTURE_QUEUE_SIZE);
VideoPicture* vp = &this->pictq[this->pictq_rindex]; VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp); this->video_display(vp);
this->frame_last_pts = vp->pts; this->frame_last_pts = vp->pts;
this->pictq_mutex.lock();
this->pictq_size -= i; this->pictq_size -= i;
// update queue for next picture // update queue for next picture
this->pictq_size--; this->pictq_size--;
this->pictq_rindex++; this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_QUEUE_SIZE;
this->pictq_cond.notify_one(); this->pictq_cond.notify_one();
this->pictq_mutex.unlock();
} }
} }
@ -260,12 +276,14 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
/* wait until we have a new pic */ /* wait until we have a new pic */
{ {
boost::unique_lock<boost::mutex> lock(this->pictq_mutex); boost::unique_lock<boost::mutex> lock(this->pictq_mutex);
while(this->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !this->quit) while(this->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !this->mQuit)
this->pictq_cond.timed_wait(lock, boost::posix_time::milliseconds(1)); this->pictq_cond.timed_wait(lock, boost::posix_time::milliseconds(1));
} }
if(this->quit) if(this->mQuit)
return -1; return -1;
this->pictq_mutex.lock();
// windex is set to 0 initially // windex is set to 0 initially
vp = &this->pictq[this->pictq_windex]; vp = &this->pictq[this->pictq_windex];
@ -292,7 +310,6 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
// now we inform our display thread that we have a pic ready // now we inform our display thread that we have a pic ready
this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_QUEUE_SIZE; this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_QUEUE_SIZE;
this->pictq_mutex.lock();
this->pictq_size++; this->pictq_size++;
this->pictq_mutex.unlock(); this->pictq_mutex.unlock();
@ -353,6 +370,21 @@ void VideoState::video_thread_loop(VideoState *self)
while(self->videoq.get(packet, self) >= 0) while(self->videoq.get(packet, self) >= 0)
{ {
if(packet->data == flush_pkt.data)
{
avcodec_flush_buffers((*self->video_st)->codec);
self->pictq_mutex.lock();
self->pictq_size = 0;
self->pictq_rindex = 0;
self->pictq_windex = 0;
self->pictq_mutex.unlock();
self->frame_last_pts = packet->pts * av_q2d((*self->video_st)->time_base);
global_video_pkt_pts = self->frame_last_pts;
continue;
}
// Save global pts to be stored in pFrame // Save global pts to be stored in pFrame
global_video_pkt_pts = packet->pts; global_video_pkt_pts = packet->pts;
// Decode video frame // Decode video frame
@ -394,8 +426,67 @@ void VideoState::decode_thread_loop(VideoState *self)
throw std::runtime_error("No streams to decode"); throw std::runtime_error("No streams to decode");
// main decode loop // main decode loop
while(!self->quit) while(!self->mQuit)
{ {
if(self->mSeekRequested)
{
uint64_t seek_target = self->mSeekPos;
int streamIndex = -1;
int videoStreamIndex = -1;;
int audioStreamIndex = -1;
if (self->video_st)
videoStreamIndex = self->video_st - self->format_ctx->streams;
if (self->audio_st)
audioStreamIndex = self->audio_st - self->format_ctx->streams;
if(videoStreamIndex >= 0)
streamIndex = videoStreamIndex;
else if(audioStreamIndex >= 0)
streamIndex = audioStreamIndex;
uint64_t timestamp = seek_target;
// QtCreator's highlighter doesn't like AV_TIME_BASE_Q's {} initializer for some reason
AVRational avTimeBaseQ = AVRational(); // = AV_TIME_BASE_Q;
avTimeBaseQ.num = 1;
avTimeBaseQ.den = AV_TIME_BASE;
if(streamIndex >= 0)
timestamp = av_rescale_q(seek_target, avTimeBaseQ, self->format_ctx->streams[streamIndex]->time_base);
// AVSEEK_FLAG_BACKWARD appears to be needed, otherwise ffmpeg may seek to a keyframe *after* the given time
// we want to seek to any keyframe *before* the given time, so we can continue decoding as normal from there on
if(av_seek_frame(self->format_ctx, streamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0)
std::cerr << "Error seeking " << self->format_ctx->filename << std::endl;
else
{
// Clear the packet queues and put a special packet with the new clock time
if(audioStreamIndex >= 0)
{
self->audioq.clear();
flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ,
self->format_ctx->streams[audioStreamIndex]->time_base);
self->audioq.put(&flush_pkt);
}
if(videoStreamIndex >= 0)
{
self->videoq.clear();
flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ,
self->format_ctx->streams[videoStreamIndex]->time_base);
self->videoq.put(&flush_pkt);
}
self->pictq_mutex.lock();
self->pictq_size = 0;
self->pictq_rindex = 0;
self->pictq_windex = 0;
self->pictq_mutex.unlock();
self->mExternalClock.set(seek_target);
}
self->mSeekRequested = false;
}
if((self->audio_st && self->audioq.size > MAX_AUDIOQ_SIZE) || if((self->audio_st && self->audioq.size > MAX_AUDIOQ_SIZE) ||
(self->video_st && self->videoq.size > MAX_VIDEOQ_SIZE)) (self->video_st && self->videoq.size > MAX_VIDEOQ_SIZE))
{ {
@ -404,7 +495,13 @@ void VideoState::decode_thread_loop(VideoState *self)
} }
if(av_read_frame(pFormatCtx, packet) < 0) if(av_read_frame(pFormatCtx, packet) < 0)
break; {
if (self->audioq.nb_packets == 0 && self->videoq.nb_packets == 0 && self->pictq_size == 0)
self->mVideoEnded = true;
continue;
}
else
self->mVideoEnded = false;
// Is this a packet from the video stream? // Is this a packet from the video stream?
if(self->video_st && packet->stream_index == self->video_st-pFormatCtx->streams) if(self->video_st && packet->stream_index == self->video_st-pFormatCtx->streams)
@ -414,17 +511,6 @@ void VideoState::decode_thread_loop(VideoState *self)
else else
av_free_packet(packet); av_free_packet(packet);
} }
/* all done - wait for it */
self->videoq.flush();
self->audioq.flush();
while(!self->quit)
{
// EOF reached, all packets processed, we can exit now
if(self->audioq.nb_packets == 0 && self->videoq.nb_packets == 0 && self->pictq_size == 0)
break;
boost::this_thread::sleep(boost::posix_time::milliseconds(100));
}
} }
catch(std::runtime_error& e) { catch(std::runtime_error& e) {
std::cerr << "An error occured playing the video: " << e.what () << std::endl; std::cerr << "An error occured playing the video: " << e.what () << std::endl;
@ -433,17 +519,14 @@ void VideoState::decode_thread_loop(VideoState *self)
std::cerr << "An error occured playing the video: " << e.getFullDescription () << std::endl; std::cerr << "An error occured playing the video: " << e.getFullDescription () << std::endl;
} }
self->quit = true; self->mQuit = true;
} }
bool VideoState::update() bool VideoState::update()
{ {
if(this->quit)
return false;
this->video_refresh(); this->video_refresh();
return true; return !this->mVideoEnded;
} }
@ -510,7 +593,7 @@ void VideoState::init(const std::string& resourceName)
unsigned int i; unsigned int i;
this->av_sync_type = AV_SYNC_DEFAULT; this->av_sync_type = AV_SYNC_DEFAULT;
this->quit = false; this->mQuit = false;
this->stream = Ogre::ResourceGroupManager::getSingleton().openResource(resourceName); this->stream = Ogre::ResourceGroupManager::getSingleton().openResource(resourceName);
if(this->stream.isNull()) if(this->stream.isNull())
@ -564,7 +647,7 @@ void VideoState::init(const std::string& resourceName)
audio_index = i; audio_index = i;
} }
this->external_clock_base = av_gettime(); mExternalClock.set(0);
if(audio_index >= 0) if(audio_index >= 0)
this->stream_open(audio_index, this->format_ctx); this->stream_open(audio_index, this->format_ctx);
@ -598,12 +681,12 @@ void VideoState::init(const std::string& resourceName)
void VideoState::deinit() void VideoState::deinit()
{ {
this->quit = true; this->mQuit = true;
mAudioDecoder.reset(); this->audioq.flush();
this->videoq.flush();
this->audioq.cond.notify_one(); mAudioDecoder.reset();
this->videoq.cond.notify_one();
if (this->parse_thread.joinable()) if (this->parse_thread.joinable())
this->parse_thread.join(); this->parse_thread.join();
@ -643,7 +726,7 @@ void VideoState::deinit()
double VideoState::get_external_clock() double VideoState::get_external_clock()
{ {
return ((uint64_t)av_gettime()-this->external_clock_base) / 1000000.0; return mExternalClock.get() / 1000000.0;
} }
double VideoState::get_master_clock() double VideoState::get_master_clock()
@ -667,5 +750,62 @@ double VideoState::get_audio_clock()
return mAudioDecoder->getAudioClock(); return mAudioDecoder->getAudioClock();
} }
void VideoState::setPaused(bool isPaused)
{
this->mPaused = isPaused;
mExternalClock.setPaused(isPaused);
}
void VideoState::seekTo(double time)
{
time = std::max(0.0, time);
time = std::min(getDuration(), time);
mSeekPos = (uint64_t) (time * AV_TIME_BASE);
mSeekRequested = true;
}
double VideoState::getDuration()
{
return this->format_ctx->duration / 1000000.0;
}
ExternalClock::ExternalClock()
: mTimeBase(av_gettime())
, mPausedAt(0)
, mPaused(false)
{
}
void ExternalClock::setPaused(bool paused)
{
boost::mutex::scoped_lock lock(mMutex);
if (mPaused == paused)
return;
if (paused)
{
mPausedAt = av_gettime() - mTimeBase;
}
else
mTimeBase = av_gettime() - mPausedAt;
mPaused = paused;
}
uint64_t ExternalClock::get()
{
boost::mutex::scoped_lock lock(mMutex);
if (mPaused)
return mPausedAt;
else
return av_gettime() - mTimeBase;
}
void ExternalClock::set(uint64_t time)
{
boost::mutex::scoped_lock lock(mMutex);
mTimeBase = av_gettime() - time;
mPausedAt = time;
}
} }

@ -27,6 +27,21 @@ struct VideoState;
class MovieAudioFactory; class MovieAudioFactory;
class MovieAudioDecoder; class MovieAudioDecoder;
struct ExternalClock
{
ExternalClock();
uint64_t mTimeBase;
uint64_t mPausedAt;
bool mPaused;
boost::mutex mMutex;
void setPaused(bool paused);
uint64_t get();
void set(uint64_t time);
};
struct PacketQueue { struct PacketQueue {
PacketQueue() PacketQueue()
: first_pkt(NULL), last_pkt(NULL), flushing(false), nb_packets(0), size(0) : first_pkt(NULL), last_pkt(NULL), flushing(false), nb_packets(0), size(0)
@ -66,6 +81,11 @@ struct VideoState {
void init(const std::string& resourceName); void init(const std::string& resourceName);
void deinit(); void deinit();
void setPaused(bool isPaused);
void seekTo(double time);
double getDuration();
int stream_open(int stream_index, AVFormatContext *pFormatCtx); int stream_open(int stream_index, AVFormatContext *pFormatCtx);
bool update(); bool update();
@ -93,15 +113,18 @@ struct VideoState {
MovieAudioFactory* mAudioFactory; MovieAudioFactory* mAudioFactory;
boost::shared_ptr<MovieAudioDecoder> mAudioDecoder; boost::shared_ptr<MovieAudioDecoder> mAudioDecoder;
ExternalClock mExternalClock;
Ogre::DataStreamPtr stream; Ogre::DataStreamPtr stream;
AVFormatContext* format_ctx; AVFormatContext* format_ctx;
int av_sync_type; int av_sync_type;
uint64_t external_clock_base;
AVStream** audio_st; AVStream** audio_st;
PacketQueue audioq; PacketQueue audioq;
uint8_t* mFlushPktData;
AVStream** video_st; AVStream** video_st;
double frame_last_pts; double frame_last_pts;
double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
@ -113,11 +136,15 @@ struct VideoState {
boost::mutex pictq_mutex; boost::mutex pictq_mutex;
boost::condition_variable pictq_cond; boost::condition_variable pictq_cond;
boost::thread parse_thread; boost::thread parse_thread;
boost::thread video_thread; boost::thread video_thread;
volatile bool quit; volatile bool mSeekRequested;
uint64_t mSeekPos;
volatile bool mVideoEnded;
volatile bool mPaused;
volatile bool mQuit;
}; };
} }

Loading…
Cancel
Save