diff --git a/apps/openmw/mwsound/openal_output.cpp b/apps/openmw/mwsound/openal_output.cpp index 71459e18c3..f94cf9b43b 100644 --- a/apps/openmw/mwsound/openal_output.cpp +++ b/apps/openmw/mwsound/openal_output.cpp @@ -3,12 +3,16 @@ #include #include #include +#include #include #include -#include +#include +#include +#include +#include #include "openal_output.hpp" #include "sound_decoder.hpp" @@ -243,31 +247,31 @@ const ALfloat OpenAL_SoundStream::sBufferLength = 0.125f; // // A background streaming thread (keeps active streams processed) // -struct OpenAL_Output::StreamThread { +struct OpenAL_Output::StreamThread : public OpenThreads::Thread { typedef std::vector StreamVec; StreamVec mStreams; volatile bool mQuitNow; - boost::mutex mMutex; - boost::condition_variable mCondVar; - boost::thread mThread; + OpenThreads::Mutex mMutex; + OpenThreads::Condition mCondVar; StreamThread() - : mQuitNow(false), mThread(boost::ref(*this)) + : mQuitNow(false) { + start(); } ~StreamThread() { mQuitNow = true; mMutex.lock(); mMutex.unlock(); - mCondVar.notify_all(); - mThread.join(); + mCondVar.broadcast(); + join(); } - // boost::thread entry point - void operator()() + // thread entry point + virtual void run() { - boost::unique_lock lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); while(!mQuitNow) { StreamVec::iterator iter = mStreams.begin(); @@ -279,31 +283,30 @@ struct OpenAL_Output::StreamThread { ++iter; } - mCondVar.timed_wait(lock, boost::posix_time::milliseconds(50)); + mCondVar.wait(&mMutex, 50); } } void add(OpenAL_SoundStream *stream) { - boost::unique_lock lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); if(std::find(mStreams.begin(), mStreams.end(), stream) == mStreams.end()) { mStreams.push_back(stream); - lock.unlock(); - mCondVar.notify_all(); + mCondVar.broadcast(); } } void remove(OpenAL_SoundStream *stream) { - boost::lock_guard lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); StreamVec::iterator iter = std::find(mStreams.begin(), mStreams.end(), stream); if(iter != mStreams.end()) mStreams.erase(iter); } void removeAll() { - boost::lock_guard lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); mStreams.clear(); } @@ -468,7 +471,7 @@ ALint OpenAL_SoundStream::refillQueue() if(got < data.size()) { mIsFinished = true; - memset(&data[got], mSilence, data.size()-got); + std::memset(&data[got], mSilence, data.size()-got); } if(got > 0) { @@ -1023,7 +1026,7 @@ double OpenAL_Output::getStreamOffset(MWBase::SoundStreamPtr sound) { if(!sound->mHandle) return 0.0; OpenAL_SoundStream *stream = reinterpret_cast(sound->mHandle); - boost::lock_guard lock(mStreamThread->mMutex); + OpenThreads::ScopedLock lock(mStreamThread->mMutex); return stream->getStreamOffset(); } @@ -1031,7 +1034,7 @@ float OpenAL_Output::getStreamLoudness(MWBase::SoundStreamPtr sound) { if(!sound->mHandle) return 0.0; OpenAL_SoundStream *stream = reinterpret_cast(sound->mHandle); - boost::lock_guard lock(mStreamThread->mMutex); + OpenThreads::ScopedLock lock(mStreamThread->mMutex); return stream->getCurrentLoudness(); } @@ -1039,7 +1042,7 @@ bool OpenAL_Output::isStreamPlaying(MWBase::SoundStreamPtr sound) { if(!sound->mHandle) return false; OpenAL_SoundStream *stream = reinterpret_cast(sound->mHandle); - boost::lock_guard lock(mStreamThread->mMutex); + OpenThreads::ScopedLock lock(mStreamThread->mMutex); return stream->isPlaying(); } diff --git a/extern/osg-ffmpeg-videoplayer/audiodecoder.cpp b/extern/osg-ffmpeg-videoplayer/audiodecoder.cpp index f095d1617a..096a17a648 100644 --- a/extern/osg-ffmpeg-videoplayer/audiodecoder.cpp +++ b/extern/osg-ffmpeg-videoplayer/audiodecoder.cpp @@ -1,5 +1,6 @@ #include "audiodecoder.hpp" +#include extern "C" { diff --git a/extern/osg-ffmpeg-videoplayer/videostate.cpp b/extern/osg-ffmpeg-videoplayer/videostate.cpp index 5849e87187..6779f24727 100644 --- a/extern/osg-ffmpeg-videoplayer/videostate.cpp +++ b/extern/osg-ffmpeg-videoplayer/videostate.cpp @@ -1,6 +1,7 @@ #include "videostate.hpp" #include +#include #include @@ -102,14 +103,14 @@ void PacketQueue::put(AVPacket *pkt) this->last_pkt = pkt1; this->nb_packets++; this->size += pkt1->pkt.size; - this->cond.notify_one(); + this->cond.signal(); this->mutex.unlock(); } int PacketQueue::get(AVPacket *pkt, VideoState *is) { - boost::unique_lock lock(this->mutex); + OpenThreads::ScopedLock lock(this->mutex); while(!is->mQuit) { AVPacketList *pkt1 = this->first_pkt; @@ -129,7 +130,7 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is) if(this->flushing) break; - this->cond.wait(lock); + this->cond.wait(&this->mutex); } return -1; @@ -138,7 +139,7 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is) void PacketQueue::flush() { this->flushing = true; - this->cond.notify_one(); + this->cond.signal(); } void PacketQueue::clear() @@ -233,7 +234,7 @@ void VideoState::video_display(VideoPicture *vp) void VideoState::video_refresh() { - boost::mutex::scoped_lock lock(this->pictq_mutex); + OpenThreads::ScopedLock lock(this->pictq_mutex); if(this->pictq_size == 0) return; @@ -245,7 +246,7 @@ void VideoState::video_refresh() this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; this->frame_last_pts = vp->pts; this->pictq_size--; - this->pictq_cond.notify_one(); + this->pictq_cond.signal(); } else { @@ -275,7 +276,7 @@ void VideoState::video_refresh() // update queue for next picture this->pictq_size--; this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; - this->pictq_cond.notify_one(); + this->pictq_cond.signal(); } } @@ -286,9 +287,9 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts) /* wait until we have a new pic */ { - boost::unique_lock lock(this->pictq_mutex); + OpenThreads::ScopedLock lock(this->pictq_mutex); while(this->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !this->mQuit) - this->pictq_cond.timed_wait(lock, boost::posix_time::milliseconds(1)); + this->pictq_cond.wait(&this->pictq_mutex, 1); } if(this->mQuit) return -1; @@ -371,168 +372,196 @@ static void our_free_buffer(void *opaque, uint8_t *data) av_free(data); } - -void VideoState::video_thread_loop(VideoState *self) +class VideoThread : public OpenThreads::Thread { - AVPacket pkt1, *packet = &pkt1; - int frameFinished; - AVFrame *pFrame; +public: + VideoThread(VideoState* self) + : mVideoState(self) + { + start(); + } + + virtual void run() + { + VideoState* self = mVideoState; + AVPacket pkt1, *packet = &pkt1; + int frameFinished; + AVFrame *pFrame; - pFrame = av_frame_alloc(); + pFrame = av_frame_alloc(); - self->rgbaFrame = av_frame_alloc(); - avpicture_alloc((AVPicture*)self->rgbaFrame, AV_PIX_FMT_RGBA, (*self->video_st)->codec->width, (*self->video_st)->codec->height); + self->rgbaFrame = av_frame_alloc(); + avpicture_alloc((AVPicture*)self->rgbaFrame, AV_PIX_FMT_RGBA, (*self->video_st)->codec->width, (*self->video_st)->codec->height); - while(self->videoq.get(packet, self) >= 0) - { - if(packet->data == flush_pkt.data) + while(self->videoq.get(packet, self) >= 0) { - avcodec_flush_buffers((*self->video_st)->codec); + if(packet->data == flush_pkt.data) + { + avcodec_flush_buffers((*self->video_st)->codec); - self->pictq_mutex.lock(); - self->pictq_size = 0; - self->pictq_rindex = 0; - self->pictq_windex = 0; - self->pictq_mutex.unlock(); + self->pictq_mutex.lock(); + self->pictq_size = 0; + self->pictq_rindex = 0; + self->pictq_windex = 0; + self->pictq_mutex.unlock(); - self->frame_last_pts = packet->pts * av_q2d((*self->video_st)->time_base); - global_video_pkt_pts = static_cast(self->frame_last_pts); - continue; - } + self->frame_last_pts = packet->pts * av_q2d((*self->video_st)->time_base); + global_video_pkt_pts = static_cast(self->frame_last_pts); + continue; + } - // Save global pts to be stored in pFrame - global_video_pkt_pts = packet->pts; - // Decode video frame - if(avcodec_decode_video2((*self->video_st)->codec, pFrame, &frameFinished, packet) < 0) - throw std::runtime_error("Error decoding video frame"); + // Save global pts to be stored in pFrame + global_video_pkt_pts = packet->pts; + // Decode video frame + if(avcodec_decode_video2((*self->video_st)->codec, pFrame, &frameFinished, packet) < 0) + throw std::runtime_error("Error decoding video frame"); - double pts = 0; - if(packet->dts != AV_NOPTS_VALUE) - pts = static_cast(packet->dts); - else if(pFrame->opaque && *(int64_t*)pFrame->opaque != AV_NOPTS_VALUE) - pts = static_cast(*(int64_t*)pFrame->opaque); - pts *= av_q2d((*self->video_st)->time_base); + double pts = 0; + if(packet->dts != AV_NOPTS_VALUE) + pts = static_cast(packet->dts); + else if(pFrame->opaque && *(int64_t*)pFrame->opaque != AV_NOPTS_VALUE) + pts = static_cast(*(int64_t*)pFrame->opaque); + pts *= av_q2d((*self->video_st)->time_base); - av_free_packet(packet); + av_free_packet(packet); - // Did we get a video frame? - if(frameFinished) - { - pts = self->synchronize_video(pFrame, pts); - if(self->queue_picture(pFrame, pts) < 0) - break; + // Did we get a video frame? + if(frameFinished) + { + pts = self->synchronize_video(pFrame, pts); + if(self->queue_picture(pFrame, pts) < 0) + break; + } } - } - av_free(pFrame); + av_free(pFrame); - avpicture_free((AVPicture*)self->rgbaFrame); - av_free(self->rgbaFrame); -} + avpicture_free((AVPicture*)self->rgbaFrame); + av_free(self->rgbaFrame); + } + +private: + VideoState* mVideoState; +}; -void VideoState::decode_thread_loop(VideoState *self) +class ParseThread : public OpenThreads::Thread { - AVFormatContext *pFormatCtx = self->format_ctx; - AVPacket pkt1, *packet = &pkt1; +public: + ParseThread(VideoState* self) + : mVideoState(self) + { + start(); + } - try + virtual void run() { - if(!self->video_st && !self->audio_st) - throw std::runtime_error("No streams to decode"); + VideoState* self = mVideoState; - // main decode loop - while(!self->mQuit) + AVFormatContext *pFormatCtx = self->format_ctx; + AVPacket pkt1, *packet = &pkt1; + + try { - if(self->mSeekRequested) + if(!self->video_st && !self->audio_st) + throw std::runtime_error("No streams to decode"); + + // main decode loop + while(!self->mQuit) { - uint64_t seek_target = self->mSeekPos; - int streamIndex = -1; - - int videoStreamIndex = -1;; - int audioStreamIndex = -1; - if (self->video_st) - videoStreamIndex = self->video_st - self->format_ctx->streams; - if (self->audio_st) - audioStreamIndex = self->audio_st - self->format_ctx->streams; - - if(videoStreamIndex >= 0) - streamIndex = videoStreamIndex; - else if(audioStreamIndex >= 0) - streamIndex = audioStreamIndex; - - uint64_t timestamp = seek_target; - - // QtCreator's highlighter doesn't like AV_TIME_BASE_Q's {} initializer for some reason - AVRational avTimeBaseQ = AVRational(); // = AV_TIME_BASE_Q; - avTimeBaseQ.num = 1; - avTimeBaseQ.den = AV_TIME_BASE; - - if(streamIndex >= 0) - timestamp = av_rescale_q(seek_target, avTimeBaseQ, self->format_ctx->streams[streamIndex]->time_base); - - // AVSEEK_FLAG_BACKWARD appears to be needed, otherwise ffmpeg may seek to a keyframe *after* the given time - // we want to seek to any keyframe *before* the given time, so we can continue decoding as normal from there on - if(av_seek_frame(self->format_ctx, streamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0) - std::cerr << "Error seeking " << self->format_ctx->filename << std::endl; - else + if(self->mSeekRequested) { - // Clear the packet queues and put a special packet with the new clock time - if(audioStreamIndex >= 0) - { - self->audioq.clear(); - flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ, - self->format_ctx->streams[audioStreamIndex]->time_base); - self->audioq.put(&flush_pkt); - } + uint64_t seek_target = self->mSeekPos; + int streamIndex = -1; + + int videoStreamIndex = -1;; + int audioStreamIndex = -1; + if (self->video_st) + videoStreamIndex = self->video_st - self->format_ctx->streams; + if (self->audio_st) + audioStreamIndex = self->audio_st - self->format_ctx->streams; + if(videoStreamIndex >= 0) + streamIndex = videoStreamIndex; + else if(audioStreamIndex >= 0) + streamIndex = audioStreamIndex; + + uint64_t timestamp = seek_target; + + // QtCreator's highlighter doesn't like AV_TIME_BASE_Q's {} initializer for some reason + AVRational avTimeBaseQ = AVRational(); // = AV_TIME_BASE_Q; + avTimeBaseQ.num = 1; + avTimeBaseQ.den = AV_TIME_BASE; + + if(streamIndex >= 0) + timestamp = av_rescale_q(seek_target, avTimeBaseQ, self->format_ctx->streams[streamIndex]->time_base); + + // AVSEEK_FLAG_BACKWARD appears to be needed, otherwise ffmpeg may seek to a keyframe *after* the given time + // we want to seek to any keyframe *before* the given time, so we can continue decoding as normal from there on + if(av_seek_frame(self->format_ctx, streamIndex, timestamp, AVSEEK_FLAG_BACKWARD) < 0) + std::cerr << "Error seeking " << self->format_ctx->filename << std::endl; + else { - self->videoq.clear(); - flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ, - self->format_ctx->streams[videoStreamIndex]->time_base); - self->videoq.put(&flush_pkt); + // Clear the packet queues and put a special packet with the new clock time + if(audioStreamIndex >= 0) + { + self->audioq.clear(); + flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ, + self->format_ctx->streams[audioStreamIndex]->time_base); + self->audioq.put(&flush_pkt); + } + if(videoStreamIndex >= 0) + { + self->videoq.clear(); + flush_pkt.pts = av_rescale_q(seek_target, avTimeBaseQ, + self->format_ctx->streams[videoStreamIndex]->time_base); + self->videoq.put(&flush_pkt); + } + self->pictq_mutex.lock(); + self->pictq_size = 0; + self->pictq_rindex = 0; + self->pictq_windex = 0; + self->pictq_mutex.unlock(); + self->mExternalClock.set(seek_target); } - self->pictq_mutex.lock(); - self->pictq_size = 0; - self->pictq_rindex = 0; - self->pictq_windex = 0; - self->pictq_mutex.unlock(); - self->mExternalClock.set(seek_target); + self->mSeekRequested = false; } - self->mSeekRequested = false; - } - if((self->audio_st && self->audioq.size > MAX_AUDIOQ_SIZE) || - (self->video_st && self->videoq.size > MAX_VIDEOQ_SIZE)) - { - boost::this_thread::sleep(boost::posix_time::milliseconds(10)); - continue; - } + if((self->audio_st && self->audioq.size > MAX_AUDIOQ_SIZE) || + (self->video_st && self->videoq.size > MAX_VIDEOQ_SIZE)) + { + OpenThreads::Thread::microSleep(10 * 1000); + continue; + } - if(av_read_frame(pFormatCtx, packet) < 0) - { - if (self->audioq.nb_packets == 0 && self->videoq.nb_packets == 0 && self->pictq_size == 0) - self->mVideoEnded = true; - continue; - } - else - self->mVideoEnded = false; + if(av_read_frame(pFormatCtx, packet) < 0) + { + if (self->audioq.nb_packets == 0 && self->videoq.nb_packets == 0 && self->pictq_size == 0) + self->mVideoEnded = true; + continue; + } + else + self->mVideoEnded = false; - // Is this a packet from the video stream? - if(self->video_st && packet->stream_index == self->video_st-pFormatCtx->streams) - self->videoq.put(packet); - else if(self->audio_st && packet->stream_index == self->audio_st-pFormatCtx->streams) - self->audioq.put(packet); - else - av_free_packet(packet); + // Is this a packet from the video stream? + if(self->video_st && packet->stream_index == self->video_st-pFormatCtx->streams) + self->videoq.put(packet); + else if(self->audio_st && packet->stream_index == self->audio_st-pFormatCtx->streams) + self->audioq.put(packet); + else + av_free_packet(packet); + } } - } - catch(std::exception& e) { - std::cerr << "An error occurred playing the video: " << e.what () << std::endl; + catch(std::exception& e) { + std::cerr << "An error occurred playing the video: " << e.what () << std::endl; + } + + self->mQuit = true; } - self->mQuit = true; -} +private: + VideoState* mVideoState; +}; bool VideoState::update() @@ -587,7 +616,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx) this->video_st = pFormatCtx->streams + stream_index; codecCtx->get_buffer2 = our_get_buffer; - this->video_thread = boost::thread(video_thread_loop, this); + this->video_thread.reset(new VideoThread(this)); break; default: @@ -669,7 +698,7 @@ void VideoState::init(boost::shared_ptr inputstream, const std::st } - this->parse_thread = boost::thread(decode_thread_loop, this); + this->parse_thread.reset(new ParseThread(this)); } void VideoState::deinit() @@ -681,10 +710,16 @@ void VideoState::deinit() mAudioDecoder.reset(); - if (this->parse_thread.joinable()) - this->parse_thread.join(); - if (this->video_thread.joinable()) - this->video_thread.join(); + if (this->parse_thread.get()) + { + this->parse_thread->join(); + this->parse_thread.reset(); + } + if (this->video_thread.get()) + { + this->video_thread->join(); + this->video_thread.reset(); + } if(this->audio_st) avcodec_close((*this->audio_st)->codec); @@ -779,7 +814,7 @@ ExternalClock::ExternalClock() void ExternalClock::setPaused(bool paused) { - boost::mutex::scoped_lock lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); if (mPaused == paused) return; if (paused) @@ -793,7 +828,7 @@ void ExternalClock::setPaused(bool paused) uint64_t ExternalClock::get() { - boost::mutex::scoped_lock lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); if (mPaused) return mPausedAt; else @@ -802,7 +837,7 @@ uint64_t ExternalClock::get() void ExternalClock::set(uint64_t time) { - boost::mutex::scoped_lock lock(mMutex); + OpenThreads::ScopedLock lock(mMutex); mTimeBase = av_gettime() - time; mPausedAt = time; } diff --git a/extern/osg-ffmpeg-videoplayer/videostate.hpp b/extern/osg-ffmpeg-videoplayer/videostate.hpp index 72a2aab186..2f0e8f30a6 100644 --- a/extern/osg-ffmpeg-videoplayer/videostate.hpp +++ b/extern/osg-ffmpeg-videoplayer/videostate.hpp @@ -2,8 +2,14 @@ #define VIDEOPLAYER_VIDEOSTATE_H #include +#include +#include -#include +#include + +#include +#include +#include #include namespace osg @@ -34,6 +40,8 @@ struct VideoState; class MovieAudioFactory; class MovieAudioDecoder; +class VideoThread; +class ParseThread; struct ExternalClock { @@ -43,7 +51,7 @@ struct ExternalClock uint64_t mPausedAt; bool mPaused; - boost::mutex mMutex; + OpenThreads::Mutex mMutex; void setPaused(bool paused); uint64_t get(); @@ -62,8 +70,8 @@ struct PacketQueue { int nb_packets; int size; - boost::mutex mutex; - boost::condition_variable cond; + OpenThreads::Mutex mutex; + OpenThreads::Condition cond; void put(AVPacket *pkt); int get(AVPacket *pkt, VideoState *is); @@ -141,11 +149,11 @@ struct VideoState { VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE]; AVFrame* rgbaFrame; // used as buffer for the frame converted from its native format to RGBA int pictq_size, pictq_rindex, pictq_windex; - boost::mutex pictq_mutex; - boost::condition_variable pictq_cond; + OpenThreads::Mutex pictq_mutex; + OpenThreads::Condition pictq_cond; - boost::thread parse_thread; - boost::thread video_thread; + std::auto_ptr parse_thread; + std::auto_ptr video_thread; volatile bool mSeekRequested; uint64_t mSeekPos;