diff --git a/CMakeLists.txt b/CMakeLists.txt index c11fda9f4..084363da7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -163,6 +163,10 @@ if (USE_MPG123) set(SOUND_DEFINE ${SOUND_DEFINE} -DOPENMW_USE_MPG123) endif (USE_MPG123) +find_package (SDL REQUIRED) +set(SOUND_INPUT_INCLUDES ${SOUND_INPUT_INCLUDES} ${SDL_INCLUDE_DIR}) +set(SOUND_INPUT_LIBRARY ${SOUND_INPUT_LIBRARY} ${SDL_LIBRARY}) + # Platform specific if (WIN32) set(Boost_USE_STATIC_LIBS ON) diff --git a/apps/openmw/mwrender/renderingmanager.cpp b/apps/openmw/mwrender/renderingmanager.cpp index a1a24b7ba..5663ded09 100644 --- a/apps/openmw/mwrender/renderingmanager.cpp +++ b/apps/openmw/mwrender/renderingmanager.cpp @@ -928,7 +928,7 @@ void RenderingManager::setupExternalRendering (MWRender::ExternalRendering& rend void RenderingManager::playVideo(const std::string& name) { - mVideoPlayer->play ("video/" + name); + mVideoPlayer->playVideo ("video/" + name); } } // namespace diff --git a/apps/openmw/mwrender/videoplayer.cpp b/apps/openmw/mwrender/videoplayer.cpp index b1fee213f..3ddc7961c 100644 --- a/apps/openmw/mwrender/videoplayer.cpp +++ b/apps/openmw/mwrender/videoplayer.cpp @@ -1,520 +1,869 @@ #include "videoplayer.hpp" -//#ifdef OPENMW_USE_FFMPEG - -#include -#include -#include -#include - - -extern "C" -{ -#include -#include -#include -} #include "../mwbase/windowmanager.hpp" #include "../mwbase/environment.hpp" -#define MIN_QUEUED_PACKETS 30 - namespace MWRender { int OgreResource_Read(void *opaque, uint8_t *buf, int buf_size) { - Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); + Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); - int num_read = stream->size() - stream->tell(); + int num_read = stream->size() - stream->tell(); - if (num_read > buf_size) - num_read = buf_size; + if (num_read > buf_size) + num_read = buf_size; - stream->read(buf, num_read); - return num_read; + stream->read(buf, num_read); + return num_read; } int OgreResource_Write(void *opaque, uint8_t *buf, int buf_size) { - Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); + Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); - int num_write = stream->size() - stream->tell(); + int num_write = stream->size() - stream->tell(); - if (num_write > buf_size) - num_write = buf_size; + if (num_write > buf_size) + num_write = buf_size; - stream->write (buf, num_write); - return num_write; + stream->write (buf, num_write); + return num_write; } int64_t OgreResource_Seek(void *opaque, int64_t offset, int whence) { - Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); + Ogre::DataStreamPtr stream = *((Ogre::DataStreamPtr*)opaque); - switch (whence) - { - case SEEK_SET: - stream->seek(offset); - case SEEK_CUR: - stream->seek(stream->tell() + offset); - case SEEK_END: - stream->seek(stream->size() + offset); - case AVSEEK_SIZE: - return stream->size(); - default: - return -1; - } + switch (whence) + { + case SEEK_SET: + stream->seek(offset); + case SEEK_CUR: + stream->seek(stream->tell() + offset); + case SEEK_END: + stream->seek(stream->size() + offset); + case AVSEEK_SIZE: + return stream->size(); + default: + return -1; + } - return stream->tell(); + return stream->tell(); } - //------------------------------------------------------------------------------------------- - AVPacketQueue::AVPacketQueue(): - mFirstPacket(NULL), mLastPacket(NULL), mNumPackets(0), mSize(0) - { - } - int AVPacketQueue::put(AVPacket* pkt) - { - if(av_dup_packet(pkt) < 0) - { + /* Since we only have one decoding thread, the Big Struct + can be global in case we need it. */ + VideoState *global_video_state; + + void packet_queue_init(PacketQueue *q) { + memset(q, 0, sizeof(PacketQueue)); + } + int packet_queue_put(PacketQueue *q, AVPacket *pkt) { + AVPacketList *pkt1; + if(av_dup_packet(pkt) < 0) { return -1; } - - AVPacketList* pkt1; pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList)); - if (pkt1 == NULL) return -1; + if (!pkt1) + return -1; pkt1->pkt = *pkt; pkt1->next = NULL; - if (mLastPacket == NULL) mFirstPacket = pkt1; - else mLastPacket->next = pkt1; - - mLastPacket = pkt1; - mNumPackets++; - mSize += pkt1->pkt.size; + q->mutex.lock (); + if (!q->last_pkt) + q->first_pkt = pkt1; + else + q->last_pkt->next = pkt1; + q->last_pkt = pkt1; + q->nb_packets++; + q->size += pkt1->pkt.size; + q->cond.notify_one(); + q->mutex.unlock (); return 0; } + static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) { + AVPacketList *pkt1; + int ret; - int AVPacketQueue::get(AVPacket* pkt, int block) - { - AVPacketList* pkt1; + boost::unique_lock lock(q->mutex); - while (true) - { - pkt1 = mFirstPacket; - if (pkt1 != NULL) - { - mFirstPacket = pkt1->next; + for(;;) { - if (mFirstPacket == NULL) mLastPacket = NULL; + if(global_video_state->quit) { + ret = -1; + break; + } - mNumPackets--; - mSize -= pkt1->pkt.size; + pkt1 = q->first_pkt; + if (pkt1) { + q->first_pkt = pkt1->next; + if (!q->first_pkt) + q->last_pkt = NULL; + q->nb_packets--; + q->size -= pkt1->pkt.size; *pkt = pkt1->pkt; av_free(pkt1); - return 1; + ret = 1; + break; + } else if (!block) { + ret = 0; + break; + } else { + + + q->cond.wait(lock); } - else if (block == 0) - { - return 0; + } + return ret; + } + static void packet_queue_flush(PacketQueue *q) { + AVPacketList *pkt, *pkt1; + + q->mutex.lock(); + for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) { + pkt1 = pkt->next; + av_free_packet(&pkt->pkt); + av_freep(&pkt); + } + q->last_pkt = NULL; + q->first_pkt = NULL; + q->nb_packets = 0; + q->size = 0; + q->mutex.unlock (); + } + double get_audio_clock(VideoState *is) { + double pts; + int hw_buf_size, bytes_per_sec, n; + + pts = is->audio_clock; /* maintained in the audio thread */ + hw_buf_size = is->audio_buf_size - is->audio_buf_index; + bytes_per_sec = 0; + n = is->audio_st->codec->channels * 2; + if(is->audio_st) { + bytes_per_sec = is->audio_st->codec->sample_rate * n; + } + if(bytes_per_sec) { + pts -= (double)hw_buf_size / bytes_per_sec; + } + return pts; + } + double get_video_clock(VideoState *is) { + double delta; + + delta = (av_gettime() - is->video_current_pts_time) / 1000000.0; + return is->video_current_pts + delta; + } + double get_external_clock(VideoState *is) { + return av_gettime() / 1000000.0; + } + double get_master_clock(VideoState *is) { + if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) { + return get_video_clock(is); + } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) { + return get_audio_clock(is); + } else { + return get_external_clock(is); + } + } + /* Add or subtract samples to get a better sync, return new + audio buffer size */ + int synchronize_audio(VideoState *is, short *samples, + int samples_size, double pts) { + int n; + double ref_clock; + + n = 2 * is->audio_st->codec->channels; + + if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { + double diff, avg_diff; + int wanted_size, min_size, max_size; + // int nb_samples; + + ref_clock = get_master_clock(is); + diff = get_audio_clock(is) - ref_clock; + if(diff < AV_NOSYNC_THRESHOLD) { + // accumulate the diffs + is->audio_diff_cum = diff + is->audio_diff_avg_coef + * is->audio_diff_cum; + if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { + is->audio_diff_avg_count++; + } else { + avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); + if(fabs(avg_diff) >= is->audio_diff_threshold) { + wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); + min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); + max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); + if(wanted_size < min_size) { + wanted_size = min_size; + } else if (wanted_size > max_size) { + wanted_size = max_size; } - else - { - return -1; + if(wanted_size < samples_size) { + /* remove samples */ + samples_size = wanted_size; + } else if(wanted_size > samples_size) { + uint8_t *samples_end, *q; + int nb; + /* add samples by copying final sample*/ + nb = (samples_size - wanted_size); + samples_end = (uint8_t *)samples + samples_size - n; + q = samples_end + n; + while(nb > 0) { + memcpy(q, samples_end, n); + q += n; + nb -= n; + } + samples_size = wanted_size; } } + } + } else { + /* difference is TOO big; reset diff stuff */ + is->audio_diff_avg_count = 0; + is->audio_diff_cum = 0; + } + } + return samples_size; } + int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) { + int len1, data_size, n; + AVPacket *pkt = &is->audio_pkt; + double pts; + + for(;;) { + while(is->audio_pkt_size > 0) { + data_size = buf_size; + len1 = avcodec_decode_audio3(is->audio_st->codec, + (int16_t *)audio_buf, &data_size, pkt); + + + if(len1 < 0) { + /* if error, skip frame */ + is->audio_pkt_size = 0; + break; + } + is->audio_pkt_data += len1; + is->audio_pkt_size -= len1; + if(data_size <= 0) { + /* No data yet, get more frames */ + continue; + } + pts = is->audio_clock; + *pts_ptr = pts; + n = 2 * is->audio_st->codec->channels; + is->audio_clock += (double)data_size / + (double)(n * is->audio_st->codec->sample_rate); + + /* We have data, return it and come back for more later */ + return data_size; + } + if(pkt->data) + av_free_packet(pkt); - //------------------------------------------------------------------------------------------- + if(is->quit) { + return -1; + } + /* next packet */ + if(packet_queue_get(&is->audioq, pkt, 1) < 0) { + return -1; + } + is->audio_pkt_data = pkt->data; + is->audio_pkt_size = pkt->size; + /* if update, update the audio clock w/pts */ + if(pkt->pts != AV_NOPTS_VALUE) { + is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; + } + } + } - VideoPlayer::VideoPlayer(Ogre::SceneManager *sceneMgr) - : mAvContext(NULL) - , mVideoStreamId(-1) - , mAudioStreamId(-1) - , mVideoClock(0) - , mAudioClock(0) - , mClock(0) - { - Ogre::MaterialPtr videoMaterial = Ogre::MaterialManager::getSingleton ().create("VideoMaterial", "General"); - videoMaterial->getTechnique(0)->getPass(0)->setDepthWriteEnabled(false); - videoMaterial->getTechnique(0)->getPass(0)->setDepthCheckEnabled(false); - videoMaterial->getTechnique(0)->getPass(0)->setLightingEnabled(false); - mTextureUnit = videoMaterial->getTechnique(0)->getPass(0)->createTextureUnitState(); + void audio_callback(void *userdata, Uint8 *stream, int len) { + VideoState *is = (VideoState *)userdata; + int len1, audio_size; + double pts; + + while(len > 0) { + if(is->audio_buf_index >= is->audio_buf_size) { + /* We have already sent all our data; get more */ + audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts); + if(audio_size < 0) { + /* If error, output silence */ + is->audio_buf_size = 1024; + memset(is->audio_buf, 0, is->audio_buf_size); + } else { + audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, + audio_size, pts); + is->audio_buf_size = audio_size; + } + is->audio_buf_index = 0; + } + len1 = is->audio_buf_size - is->audio_buf_index; + if(len1 > len) + len1 = len; + memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); + len -= len1; + stream += len1; + is->audio_buf_index += len1; + } + } - mRectangle = new Ogre::Rectangle2D(true); - mRectangle->setCorners(-1.0, 1.0, 1.0, -1.0); - mRectangle->setMaterial("VideoMaterial"); - mRectangle->setRenderQueueGroup(Ogre::RENDER_QUEUE_OVERLAY+1); - // Use infinite AAB to always stay visible - Ogre::AxisAlignedBox aabInf; - aabInf.setInfinite(); - mRectangle->setBoundingBox(aabInf); - // Attach background to the scene - Ogre::SceneNode* node = sceneMgr->getRootSceneNode()->createChildSceneNode(); - node->attachObject(mRectangle); - mRectangle->setVisible(false); - mRectangle->setVisibilityFlags (0x1); + /* + static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { + SDL_Event event; + event.type = FF_REFRESH_EVENT; + event.user.data1 = opaque; + SDL_PushEvent(&event); + return 0; // 0 means stop timer } + */ - VideoPlayer::~VideoPlayer() + void timer_callback (int delay, VideoState* is) { - if (mAvContext) - deleteContext(); - - delete mRectangle; + boost::this_thread::sleep (boost::posix_time::milliseconds(delay)); + is->refresh++; } - void VideoPlayer::play (const std::string& resourceName) + /* schedule a video refresh in 'delay' ms */ + static void schedule_refresh(VideoState *is, int delay) { - mStream = Ogre::ResourceGroupManager::getSingleton ().openResource (resourceName); - - - mVideoStreamId = -1; - mAudioStreamId = -1; - mAudioStream = NULL; - mVideoStream = NULL; - mVideoClock = 0; - mAudioClock = 0; - mClock = 0; + //SDL_AddTimer(delay, sdl_refresh_timer_cb, is); + //is->refresh_queue.push_back (delay); - // if something is already playing, close it - if (mAvContext) - close(); + boost::thread (boost::bind(&timer_callback, delay, is)); + } - mRectangle->setVisible(true); + void video_display(VideoState *is) + { + VideoPicture *vp; - MWBase::Environment::get().getWindowManager ()->pushGuiMode (MWGui::GM_Video); + vp = &is->pictq[is->pictq_rindex]; + if (is->video_st->codec->width != 0 && is->video_st->codec->height != 0) + { + Ogre::TexturePtr texture = Ogre::TextureManager::getSingleton ().getByName("VideoTexture"); + if (texture.isNull () || texture->getWidth() != is->video_st->codec->width || texture->getHeight() != is->video_st->codec->height) + { + Ogre::TextureManager::getSingleton ().remove ("VideoTexture"); + texture = Ogre::TextureManager::getSingleton().createManual( + "VideoTexture", + Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, + Ogre::TEX_TYPE_2D, + is->video_st->codec->width, is->video_st->codec->height, + 0, + Ogre::PF_BYTE_RGBA, + Ogre::TU_DYNAMIC_WRITE_ONLY_DISCARDABLE); + } + Ogre::PixelBox pb(is->video_st->codec->width, is->video_st->codec->height, 1, Ogre::PF_BYTE_RGBA, vp->data); + Ogre::HardwarePixelBufferSharedPtr buffer = texture->getBuffer(); + buffer->blitFromMemory(pb); + } - // BASIC INITIALIZATION + free(vp->data); + } - // Load all the decoders - av_register_all(); - AVIOContext *ioContext = 0; + void video_refresh_timer(void *userdata) { - int err = 0; + VideoState *is = (VideoState *)userdata; + VideoPicture *vp; + double actual_delay, delay, sync_threshold, ref_clock, diff; - mAvContext = avformat_alloc_context(); - if (!mAvContext) - throwError(0); + if(is->video_st) { + if(is->pictq_size == 0) { + schedule_refresh(is, 1); + } else { + vp = &is->pictq[is->pictq_rindex]; - ioContext = avio_alloc_context(NULL, 0, 0, &mStream, OgreResource_Read, OgreResource_Write, OgreResource_Seek); - if (!ioContext) - throwError(0); + is->video_current_pts = vp->pts; + is->video_current_pts_time = av_gettime(); - mAvContext->pb = ioContext; + delay = vp->pts - is->frame_last_pts; /* the pts from last time */ + if(delay <= 0 || delay >= 1.0) { + /* if incorrect delay, use previous one */ + delay = is->frame_last_delay; + } + /* save for next time */ + is->frame_last_delay = delay; + is->frame_last_pts = vp->pts; + + /* update delay to sync to audio if not master source */ + if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { + ref_clock = get_master_clock(is); + diff = vp->pts - ref_clock; + + /* Skip or repeat the frame. Take delay into account + FFPlay still doesn't "know if this is the best guess." */ + sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; + if(fabs(diff) < AV_NOSYNC_THRESHOLD) { + if(diff <= -sync_threshold) { + delay = 0; + } else if(diff >= sync_threshold) { + delay = 2 * delay; + } + } + } - err = avformat_open_input(&mAvContext, resourceName.c_str(), NULL, NULL); - if (err != 0) - throwError(err); + is->frame_timer += delay; + /* computer the REAL delay */ + actual_delay = is->frame_timer - (av_gettime() / 1000000.0); + if(actual_delay < 0.010) { + /* Really it should skip the picture instead */ + actual_delay = 0.010; + } + schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); - err = avformat_find_stream_info(mAvContext, 0); - if (err < 0) - throwError(err); + /* show the picture! */ + video_display(is); + /* update queue for next picture! */ + if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { + is->pictq_rindex = 0; + } + is->pictq_mutex.lock(); + is->pictq_size--; + is->pictq_cond.notify_one (); + is->pictq_mutex.unlock (); + } + } else { + schedule_refresh(is, 100); + } + } + int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { + VideoPicture *vp; - // Find the video stream among the different streams - for (unsigned int i = 0; i < mAvContext->nb_streams; i++) + /* wait until we have a new pic */ { - if (mAvContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) - { - mVideoStreamId = i; - mVideoStream = mAvContext->streams[i]; - break; + boost::unique_lock lock(is->pictq_mutex); + while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && + !is->quit) { + is->pictq_cond.timed_wait(lock, boost::posix_time::milliseconds(1)); } } - if (mVideoStreamId < 0) - throw std::runtime_error("No video stream found in the video"); - - // Get the video decoder - mVideoCodec = avcodec_find_decoder(mVideoStream->codec->codec_id); - if (NULL == mVideoCodec) - throw std::runtime_error("No video decoder found"); + if(is->quit) + return -1; - // Load the video codec - err = avcodec_open2(mVideoStream->codec, mVideoCodec, 0); - if (err < 0) - throwError (err); + // windex is set to 0 initially + vp = &is->pictq[is->pictq_windex]; + + // Convert the image into YUV format that SDL uses + if(is->sws_context == NULL) { + int w = is->video_st->codec->width; + int h = is->video_st->codec->height; + is->sws_context = sws_getContext(w, h, + is->video_st->codec->pix_fmt, w, h, + PIX_FMT_RGBA, SWS_BICUBIC, NULL, NULL, NULL); + if(is->sws_context == NULL) + throw std::runtime_error("Cannot initialize the conversion context!\n"); + } + vp->data =(uint8_t*) malloc(is->video_st->codec->width * is->video_st->codec->height * 4); + sws_scale(is->sws_context, pFrame->data, pFrame->linesize, + 0, is->video_st->codec->height, &vp->data, is->rgbaFrame->linesize); - // Find the audio stream among the different streams - for (unsigned int i = 0; i < mAvContext->nb_streams; i++) - { - if (mAvContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) - { - mAudioStreamId = i; - mAudioStream = mAvContext->streams[i]; - break; - } - } - if (mAudioStreamId >= 0) - { - // Get the audio decoder - mAudioCodec = avcodec_find_decoder(mAudioStream->codec->codec_id); - if (mAudioCodec == NULL) - { - throw std::runtime_error("Stream doesn't have an audio codec"); - } + vp->pts = pts; - // Load the audio codec - err = avcodec_open2(mAudioStream->codec, mAudioCodec, 0); - if (err < 0) - throwError (err); + // now we inform our display thread that we have a pic ready + if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { + is->pictq_windex = 0; } + is->pictq_mutex.lock(); + is->pictq_size++; + is->pictq_mutex.unlock(); + return 0; + } + double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) { - // Create the frame buffers - mRawFrame = avcodec_alloc_frame(); - mRGBAFrame = avcodec_alloc_frame(); - if (!mRawFrame || !mRGBAFrame) - { - throw std::runtime_error("Can't allocate video frames"); + double frame_delay; + + if(pts != 0) { + /* if we have pts, set video clock to it */ + is->video_clock = pts; + } else { + /* if we aren't given a pts, set it to the clock */ + pts = is->video_clock; } + /* update the video clock */ + frame_delay = av_q2d(is->video_st->codec->time_base); + /* if we are repeating a frame, adjust clock accordingly */ + frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); + is->video_clock += frame_delay; + return pts; + } + uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; + + /* These are called whenever we allocate a frame + * buffer. We use this to store the global_pts in + * a frame at the time it is allocated. + */ + int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { + int ret = avcodec_default_get_buffer(c, pic); + uint64_t *pts = (uint64_t*)av_malloc(sizeof(uint64_t)); + *pts = global_video_pkt_pts; + pic->opaque = pts; + return ret; + } + void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { + if(pic) av_freep(&pic->opaque); + avcodec_default_release_buffer(c, pic); + } - avpicture_alloc ((AVPicture *)mRGBAFrame, PIX_FMT_RGBA, mVideoStream->codec->width, mVideoStream->codec->height); + int video_thread(void *arg) { + VideoState *is = (VideoState *)arg; + AVPacket pkt1, *packet = &pkt1; + int len1, frameFinished; + AVFrame *pFrame; + double pts; + pFrame = avcodec_alloc_frame(); - // Setup the image scaler - // All this does is convert from YUV to RGB - note it would be faster to do this in a shader, - // but i'm not worried about performance just yet - mSwsContext = sws_getContext(mVideoStream->codec->width, mVideoStream->codec->height, - mVideoStream->codec->pix_fmt, - mVideoStream->codec->width, mVideoStream->codec->height, - PIX_FMT_RGBA, - SWS_BICUBIC, NULL, NULL, NULL); - if (!mSwsContext) - throw std::runtime_error("Can't create SWS Context"); + is->rgbaFrame = avcodec_alloc_frame(); + avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height); - mTextureUnit->setTextureName (""); - if (!Ogre::TextureManager::getSingleton ().getByName("VideoTexture").isNull()) - Ogre::TextureManager::getSingleton().remove("VideoTexture"); + for(;;) { + if(packet_queue_get(&is->videoq, packet, 1) < 0) { + // means we quit getting packets + break; + } + pts = 0; + + // Save global pts to be stored in pFrame + global_video_pkt_pts = packet->pts; + // Decode video frame + len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, + packet); + if(packet->dts == AV_NOPTS_VALUE + && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { + pts = *(uint64_t *)pFrame->opaque; + } else if(packet->dts != AV_NOPTS_VALUE) { + pts = packet->dts; + } else { + pts = 0; + } + pts *= av_q2d(is->video_st->time_base); - mVideoTexture = Ogre::TextureManager::getSingleton().createManual( - "VideoTexture", - Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, - Ogre::TEX_TYPE_2D, - mVideoStream->codec->width, mVideoStream->codec->height, - 0, - Ogre::PF_BYTE_RGBA, - Ogre::TU_DYNAMIC_WRITE_ONLY_DISCARDABLE); - // initialize to (0, 0, 0, 1) - std::vector buffer; - buffer.resize(mVideoStream->codec->width * mVideoStream->codec->height); - for (int p=0; pcodec->width * mVideoStream->codec->height; ++p) - { - buffer[p] = (255 << 24); + // Did we get a video frame? + if(frameFinished) { + pts = synchronize_video(is, pFrame, pts); + if(queue_picture(is, pFrame, pts) < 0) { + break; + } + } + av_free_packet(packet); } - memcpy(mVideoTexture->getBuffer()->lock(Ogre::HardwareBuffer::HBL_DISCARD), &buffer[0], mVideoStream->codec->width*mVideoStream->codec->height*4); - mVideoTexture->getBuffer()->unlock(); - mTextureUnit->setTextureName ("VideoTexture"); + SDL_CloseAudio(); + av_free(pFrame); - // Queue up some packets - while( - mVideoPacketQueue.getNumPackets()rgbaFrame); + av_free(is->rgbaFrame); - mTimer.reset(); + return 0; } - void VideoPlayer::throwError(int error) + int stream_component_open(VideoState *is, int stream_index, AVFormatContext *pFormatCtx) { - char buffer[4096] = {0}; + AVCodecContext *codecCtx; + AVCodec *codec; + SDL_AudioSpec wanted_spec, spec; - if (0 == av_strerror(error, buffer, sizeof(buffer))) - { - std::stringstream msg; - msg << "FFMPEG error: "; - msg << buffer << std::endl; - throw std::runtime_error(msg.str()); + if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { + return -1; } - else - throw std::runtime_error("Unknown FFMPEG error"); + + // Get a pointer to the codec context for the video stream + codecCtx = pFormatCtx->streams[stream_index]->codec; + + if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { + // Set audio settings from codec info + wanted_spec.freq = codecCtx->sample_rate; + wanted_spec.format = AUDIO_S16SYS; + wanted_spec.channels = codecCtx->channels; + wanted_spec.silence = 0; + wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; + wanted_spec.callback = audio_callback; + wanted_spec.userdata = is; + + if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { + fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); + return -1; + } + is->audio_hw_buf_size = spec.size; + } + codec = avcodec_find_decoder(codecCtx->codec_id); + if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) { + fprintf(stderr, "Unsupported codec!\n"); + return -1; + } + + switch(codecCtx->codec_type) { + case AVMEDIA_TYPE_AUDIO: + is->audioStream = stream_index; + is->audio_st = pFormatCtx->streams[stream_index]; + is->audio_buf_size = 0; + is->audio_buf_index = 0; + + /* averaging filter for audio sync */ + is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); + is->audio_diff_avg_count = 0; + /* Correct audio only if larger error than this */ + is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; + + memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); + packet_queue_init(&is->audioq); + SDL_PauseAudio(0); + break; + case AVMEDIA_TYPE_VIDEO: + is->videoStream = stream_index; + is->video_st = pFormatCtx->streams[stream_index]; + + is->frame_timer = (double)av_gettime() / 1000000.0; + is->frame_last_delay = 40e-3; + is->video_current_pts_time = av_gettime(); + + packet_queue_init(&is->videoq); + is->video_thread = boost::thread(video_thread, is); + codecCtx->get_buffer = our_get_buffer; + codecCtx->release_buffer = our_release_buffer; + + break; + default: + break; + } + + } - void VideoPlayer::update() - { - if (!mAvContext) - return; + int decode_interrupt_cb(void) { + return (global_video_state && global_video_state->quit); + } + + int decode_thread(void *arg) { + + VideoState *is = (VideoState *)arg; + AVFormatContext *pFormatCtx = avformat_alloc_context (); + AVPacket pkt1, *packet = &pkt1; + + int video_index = -1; + int audio_index = -1; + int i; + + is->videoStream=-1; + is->audioStream=-1; + is->quit = 0; + + Ogre::DataStreamPtr stream = Ogre::ResourceGroupManager::getSingleton ().openResource (is->resourceName); + if(stream.isNull ()) + throw std::runtime_error("Failed to open video resource"); + + AVIOContext *ioContext = 0; + + ioContext = avio_alloc_context(NULL, 0, 0, &stream, OgreResource_Read, OgreResource_Write, OgreResource_Seek); + if (!ioContext) + throw std::runtime_error("Failed to allocate ioContext "); + + pFormatCtx->pb = ioContext; + + global_video_state = is; + // will interrupt blocking functions if we quit! + //url_set_interrupt_cb(decode_interrupt_cb); + + // Open video file + /// \todo leak here, ffmpeg or valgrind bug ? + if (avformat_open_input(&pFormatCtx, is->resourceName.c_str(), NULL, NULL)) + throw std::runtime_error("Failed to open video input"); - double dt = mTimer.getMilliseconds () / 1000.f; - mTimer.reset (); + // Retrieve stream information + if(avformat_find_stream_info(pFormatCtx, NULL)<0) + throw std::runtime_error("Failed to retrieve stream information"); - //UpdateAudio(fTime); - //std::cout << "num packets: " << mVideoPacketQueue.getNumPackets() << " clocks: " << mVideoClock << " , " << mClock << std::endl; - while (!mVideoPacketQueue.isEmpty() && mVideoClock < mClock) + // Dump information about file onto standard error + av_dump_format(pFormatCtx, 0, is->resourceName.c_str(), 0); + + for(i=0; inb_streams; i++) { + if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && + video_index < 0) { + video_index=i; + } + if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && + audio_index < 0) { + audio_index=i; + } + } + + if(audio_index >= 0) { + stream_component_open(is, audio_index, pFormatCtx); + } + if(video_index >= 0) { + stream_component_open(is, video_index, pFormatCtx); + } + + if(is->videoStream >= 0 /*|| is->audioStream < 0*/) { - while( - mVideoPacketQueue.getNumPackets()quit) { break; } + if( (is->audioStream >= 0 && is->audioq.size > MAX_AUDIOQ_SIZE) || + is->videoq.size > MAX_VIDEOQ_SIZE) { + boost::this_thread::sleep(boost::posix_time::milliseconds(10)); + continue; + } + if(av_read_frame(pFormatCtx, packet) < 0) { + break; + } + // Is this a packet from the video stream? + if(packet->stream_index == is->videoStream) { + packet_queue_put(&is->videoq, packet); + } else if(packet->stream_index == is->audioStream) { + packet_queue_put(&is->audioq, packet); + } else { + av_free_packet(packet); + } + } + /* all done - wait for it */ + while(!is->quit) { + // EOF reached, all packets processed, we can exit now + if (is->audioq.nb_packets == 0 && is->videoq.nb_packets == 0) + break; + boost::this_thread::sleep(boost::posix_time::milliseconds(100)); } - - if (mVideoPacketQueue.getNumPackets ()) - decodeNextVideoFrame(); } - mClock += dt; - - //curTime += fTime; + is->quit = 1; - if(mVideoPacketQueue.getNumPackets()==0 /* && mAudioPacketQueue.getNumPackets()==0 */) - close(); - } + is->audioq.cond.notify_one (); + is->videoq.cond.notify_one (); - void VideoPlayer::decodeNextVideoFrame () - { - // Make sure there is something to decode - assert (mVideoPacketQueue.getNumPackets ()); + is->video_thread.join(); - // Get the front frame and decode it - AVPacket packet; - mVideoPacketQueue.get(&packet, 1); + if (is->audioStream >= 0) + avcodec_close(is->audio_st->codec); + if (is->videoStream >= 0) + avcodec_close(is->video_st->codec); - int res; - int didDecodeFrame = 0; - res = avcodec_decode_video2(mVideoStream->codec, mRawFrame, &didDecodeFrame, &packet); + sws_freeContext (is->sws_context); - if (res < 0 || !didDecodeFrame) - throw std::runtime_error ("an error occured while decoding the video frame"); + av_close_input_file(pFormatCtx); + pFormatCtx = NULL; - // Set video clock to the PTS of this packet (presentation timestamp) - double pts = 0; - if (packet.pts != -1.0) pts = packet.pts; - pts *= av_q2d(mVideoStream->time_base); - mVideoClock = pts; + av_free(ioContext); - // Convert the frame to RGB - sws_scale(mSwsContext, - mRawFrame->data, mRawFrame->linesize, - 0, mVideoStream->codec->height, - mRGBAFrame->data, mRGBAFrame->linesize); + return 0; + } - Ogre::HardwarePixelBufferSharedPtr pixelBuffer = mVideoTexture->getBuffer(); - Ogre::PixelBox pb(mVideoStream->codec->width, mVideoStream->codec->height, 1, Ogre::PF_BYTE_RGBA, mRGBAFrame->data[0]); - pixelBuffer->blitFromMemory(pb); + VideoPlayer::VideoPlayer(Ogre::SceneManager* sceneMgr) + : mState(NULL) + , mSceneMgr(sceneMgr) + { + mVideoMaterial = Ogre::MaterialManager::getSingleton ().create("VideoMaterial", "General"); + mVideoMaterial->getTechnique(0)->getPass(0)->setDepthWriteEnabled(false); + mVideoMaterial->getTechnique(0)->getPass(0)->setDepthCheckEnabled(false); + mVideoMaterial->getTechnique(0)->getPass(0)->setLightingEnabled(false); + mVideoMaterial->getTechnique(0)->getPass(0)->createTextureUnitState(); - if (packet.data != NULL) av_free_packet(&packet); + mRectangle = new Ogre::Rectangle2D(true); + mRectangle->setCorners(-1.0, 1.0, 1.0, -1.0); + mRectangle->setMaterial("VideoMaterial"); + mRectangle->setRenderQueueGroup(Ogre::RENDER_QUEUE_OVERLAY+1); + // Use infinite AAB to always stay visible + Ogre::AxisAlignedBox aabInf; + aabInf.setInfinite(); + mRectangle->setBoundingBox(aabInf); + // Attach background to the scene + Ogre::SceneNode* node = sceneMgr->getRootSceneNode()->createChildSceneNode(); + node->attachObject(mRectangle); + mRectangle->setVisible(false); + mRectangle->setVisibilityFlags (0x1); } - void VideoPlayer::close () + VideoPlayer::~VideoPlayer () { - mRectangle->setVisible (false); - MWBase::Environment::get().getWindowManager ()->removeGuiMode (MWGui::GM_Video); - deleteContext(); + if (mState) + close(); } - void VideoPlayer::deleteContext() + void VideoPlayer::playVideo (const std::string &resourceName) { - while (mVideoPacketQueue.getNumPackets ()) - { - AVPacket packet; - mVideoPacketQueue.get(&packet, 1); - if (packet.data != NULL) av_free_packet(&packet); - } - while (mAudioPacketQueue.getNumPackets ()) - { - AVPacket packet; - mAudioPacketQueue.get(&packet, 1); - if (packet.data != NULL) av_free_packet(&packet); - } + if (mState) + close(); - if (mVideoStream && mVideoStream->codec != NULL) avcodec_close(mVideoStream->codec); - if (mAudioStream && mAudioStream->codec != NULL) avcodec_close(mAudioStream->codec); + mRectangle->setVisible(true); - avpicture_free((AVPicture *)mRGBAFrame); + MWBase::Environment::get().getWindowManager ()->pushGuiMode (MWGui::GM_Video); - if (mRawFrame) - av_free(mRawFrame); - if (mRGBAFrame) - av_free(mRGBAFrame); + mState = new VideoState; - sws_freeContext(mSwsContext); + // Register all formats and codecs + av_register_all(); - avformat_close_input(&mAvContext); + if(SDL_Init(SDL_INIT_AUDIO)) { + throw std::runtime_error("Failed to initialize SDL"); + } - mAvContext = NULL; - } + mState->refresh = 0; + mState->resourceName = resourceName; + schedule_refresh(mState, 40); + mState->av_sync_type = DEFAULT_AV_SYNC_TYPE; + mState->parse_thread = boost::thread(decode_thread, mState); + } - bool VideoPlayer::addToBuffer() + void VideoPlayer::update () { - if(mAvContext) + if (mState && mState->refresh) { - AVPacket packet; - if (av_read_frame(mAvContext, &packet) >= 0) - { - if (packet.stream_index == mVideoStreamId) - { - // I don't believe this is necessary. - /* - if(mClock==0) - { - mClock = packet.dts; - mClock *= av_q2d(mVideoStream->time_base); - std::cout << "Initializing clock to: " << mClock << std::endl; - } - */ - - mVideoPacketQueue.put(&packet); - - return true; - } - else if (packet.stream_index == mAudioStreamId && mAudioStream) - { - mAudioPacketQueue.put(&packet); - return true; - } - else - { - av_free_packet(&packet); - return false; - } - } + video_refresh_timer (mState); + mState->refresh--; + } + if (mState && mState->quit) + { + close(); } - return false; + if (!Ogre::TextureManager::getSingleton ().getByName ("VideoTexture").isNull ()) + mVideoMaterial->getTechnique(0)->getPass(0)->getTextureUnitState (0)->setTextureName ("VideoTexture"); + } + + void VideoPlayer::close() + { + mState->quit = 1; + + mState->parse_thread.join (); + + delete mState; + mState = NULL; + + mRectangle->setVisible (false); + MWBase::Environment::get().getWindowManager ()->removeGuiMode (MWGui::GM_Video); } -} -//#endif + bool VideoPlayer::isPlaying () + { + return mState != NULL; + } + +} diff --git a/apps/openmw/mwrender/videoplayer.hpp b/apps/openmw/mwrender/videoplayer.hpp index 466bb7902..9766ba8ee 100644 --- a/apps/openmw/mwrender/videoplayer.hpp +++ b/apps/openmw/mwrender/videoplayer.hpp @@ -1,137 +1,162 @@ -#ifndef MWRENDER_VIDEOPLAYER_H -#define MWRENDER_VIDEOPLAYER_H +#ifndef VIDEOPLAYER_H +#define VIDEOPLAYER_H -//#ifdef OPENMW_USE_FFMPEG +#include +#include +#include -#include - -#include -#include -#include - -namespace Ogre +#define __STDC_CONSTANT_MACROS +#include +extern "C" { - class Rectangle2D; - class SceneManager; - class TextureUnitState; +#include +#include +#include } -struct AVFormatContext; -struct AVCodecContext; -struct AVCodec; -struct AVStream; -struct AVFrame; -struct SwsContext; -struct AVPacket; -struct AVPacketList; +#include +#include + +#include +#include + +#define SDL_AUDIO_BUFFER_SIZE 1024 +#define MAX_AUDIOQ_SIZE (5 * 16 * 1024) +#define MAX_VIDEOQ_SIZE (5 * 256 * 1024) +#define AV_SYNC_THRESHOLD 0.01 +#define AV_NOSYNC_THRESHOLD 10.0 +#define SAMPLE_CORRECTION_PERCENT_MAX 10 +#define AUDIO_DIFF_AVG_NB 20 +#define VIDEO_PICTURE_QUEUE_SIZE 1 +#define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER + + namespace MWRender { - /// A simple queue used to queue raw audio and video data. - class AVPacketQueue - { - public: - AVPacketQueue(); - int put(AVPacket* pkt); - int get(AVPacket* pkt, int block); - bool isEmpty() const { return mNumPackets == 0; } - int getNumPackets() const { return mNumPackets; } - int getSize() const { return mSize; } + struct PacketQueue { + PacketQueue () : + first_pkt(NULL), last_pkt(NULL), nb_packets(0), size(0) + {} + AVPacketList *first_pkt, *last_pkt; + int nb_packets; + int size; - private: - AVPacketList* mFirstPacket; - AVPacketList* mLastPacket; - int mNumPackets; - int mSize; + boost::mutex mutex; + boost::condition_variable cond; + }; + struct VideoPicture { + VideoPicture () : + data(NULL), pts(0) + {} + uint8_t* data; + + double pts; + }; + + static void packet_queue_flush(PacketQueue *q); + + struct VideoState { + VideoState () : + videoStream(-1), audioStream(-1), av_sync_type(0), external_clock(0), + external_clock_time(0), audio_clock(0), audio_st(NULL), audio_buf_size(0), + audio_pkt_data(NULL), audio_pkt_size(0), audio_hw_buf_size(0), audio_diff_cum(0), audio_diff_avg_coef(0), + audio_diff_threshold(0), audio_diff_avg_count(0), frame_timer(0), frame_last_pts(0), frame_last_delay(0), + video_clock(0), video_current_pts(0), video_current_pts_time(0), video_st(NULL), rgbaFrame(NULL), pictq_size(0), + pictq_rindex(0), pictq_windex(0), quit(false), refresh(0), sws_context(NULL) + {} + + + ~VideoState() + { + packet_queue_flush (&audioq); + packet_queue_flush (&videoq); + + if (pictq_size >= 1) + free (pictq[0].data); + } + + int videoStream, audioStream; + + int av_sync_type; + double external_clock; /* external clock base */ + int64_t external_clock_time; + double audio_clock; + AVStream *audio_st; + PacketQueue audioq; + DECLARE_ALIGNED(16, uint8_t, audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]); + unsigned int audio_buf_size; + unsigned int audio_buf_index; + AVPacket audio_pkt; + uint8_t *audio_pkt_data; + int audio_pkt_size; + int audio_hw_buf_size; + double audio_diff_cum; /* used for AV difference average computation */ + double audio_diff_avg_coef; + double audio_diff_threshold; + int audio_diff_avg_count; + double frame_timer; + double frame_last_pts; + double frame_last_delay; + double video_clock; ///. This is done for portability +# reasons because not all systems place things in SDL/ (see FreeBSD). + +#============================================================================= +# Copyright 2003-2009 Kitware, Inc. +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +FIND_PATH(SDL_INCLUDE_DIR SDL.h + HINTS + $ENV{SDLDIR} + PATH_SUFFIXES include/SDL include + PATHS + ~/Library/Frameworks + /Library/Frameworks + /usr/local/include/SDL12 + /usr/local/include/SDL11 # FreeBSD ports + /usr/include/SDL12 + /usr/include/SDL11 + /sw # Fink + /opt/local # DarwinPorts + /opt/csw # Blastwave + /opt +) +#MESSAGE("SDL_INCLUDE_DIR is ${SDL_INCLUDE_DIR}") + +# SDL-1.1 is the name used by FreeBSD ports... +# don't confuse it for the version number. +FIND_LIBRARY(SDL_LIBRARY_TEMP + NAMES SDL SDL-1.1 + HINTS + $ENV{SDLDIR} + PATH_SUFFIXES lib64 lib + PATHS + /sw + /opt/local + /opt/csw + /opt +) + +#MESSAGE("SDL_LIBRARY_TEMP is ${SDL_LIBRARY_TEMP}") + +IF(NOT SDL_BUILDING_LIBRARY) + IF(NOT ${SDL_INCLUDE_DIR} MATCHES ".framework") + # Non-OS X framework versions expect you to also dynamically link to + # SDLmain. This is mainly for Windows and OS X. Other (Unix) platforms + # seem to provide SDLmain for compatibility even though they don't + # necessarily need it. + FIND_LIBRARY(SDLMAIN_LIBRARY + NAMES SDLmain SDLmain-1.1 + HINTS + $ENV{SDLDIR} + PATH_SUFFIXES lib64 lib + PATHS + /sw + /opt/local + /opt/csw + /opt + ) + ENDIF(NOT ${SDL_INCLUDE_DIR} MATCHES ".framework") +ENDIF(NOT SDL_BUILDING_LIBRARY) + +# SDL may require threads on your system. +# The Apple build may not need an explicit flag because one of the +# frameworks may already provide it. +# But for non-OSX systems, I will use the CMake Threads package. +IF(NOT APPLE) + FIND_PACKAGE(Threads) +ENDIF(NOT APPLE) + +# MinGW needs an additional library, mwindows +# It's total link flags should look like -lmingw32 -lSDLmain -lSDL -lmwindows +# (Actually on second look, I think it only needs one of the m* libraries.) +IF(MINGW) + SET(MINGW32_LIBRARY mingw32 CACHE STRING "mwindows for MinGW") +ENDIF(MINGW) + +SET(SDL_FOUND "NO") +IF(SDL_LIBRARY_TEMP) + # For SDLmain + IF(NOT SDL_BUILDING_LIBRARY) + IF(SDLMAIN_LIBRARY) + SET(SDL_LIBRARY_TEMP ${SDLMAIN_LIBRARY} ${SDL_LIBRARY_TEMP}) + ENDIF(SDLMAIN_LIBRARY) + ENDIF(NOT SDL_BUILDING_LIBRARY) + + # For OS X, SDL uses Cocoa as a backend so it must link to Cocoa. + # CMake doesn't display the -framework Cocoa string in the UI even + # though it actually is there if I modify a pre-used variable. + # I think it has something to do with the CACHE STRING. + # So I use a temporary variable until the end so I can set the + # "real" variable in one-shot. + IF(APPLE) + SET(SDL_LIBRARY_TEMP ${SDL_LIBRARY_TEMP} "-framework Cocoa") + ENDIF(APPLE) + + # For threads, as mentioned Apple doesn't need this. + # In fact, there seems to be a problem if I used the Threads package + # and try using this line, so I'm just skipping it entirely for OS X. + IF(NOT APPLE) + SET(SDL_LIBRARY_TEMP ${SDL_LIBRARY_TEMP} ${CMAKE_THREAD_LIBS_INIT}) + ENDIF(NOT APPLE) + + # For MinGW library + IF(MINGW) + SET(SDL_LIBRARY_TEMP ${MINGW32_LIBRARY} ${SDL_LIBRARY_TEMP}) + ENDIF(MINGW) + + # Set the final string here so the GUI reflects the final state. + SET(SDL_LIBRARY ${SDL_LIBRARY_TEMP} CACHE STRING "Where the SDL Library can be found") + # Set the temp variable to INTERNAL so it is not seen in the CMake GUI + SET(SDL_LIBRARY_TEMP "${SDL_LIBRARY_TEMP}" CACHE INTERNAL "") + + SET(SDL_FOUND "YES") +ENDIF(SDL_LIBRARY_TEMP) + +#MESSAGE("SDL_LIBRARY is ${SDL_LIBRARY}") +