mirror of
				https://github.com/OpenMW/openmw.git
				synced 2025-11-03 23:56:43 +00:00 
			
		
		
		
	Merge branch 'fix-video-arm' into 'master'
osg-ffmpeg-videoplayer: Fix crash on ARM Closes #5807 See merge request OpenMW/openmw!564
This commit is contained in:
		
						commit
						194875dec1
					
				
					 5 changed files with 83 additions and 40 deletions
				
			
		| 
						 | 
				
			
			@ -97,6 +97,7 @@
 | 
			
		|||
    Bug #5739: Saving and loading the save a second or two before hitting the ground doesn't count fall damage
 | 
			
		||||
    Bug #5758: Paralyzed actors behavior is inconsistent with vanilla
 | 
			
		||||
    Bug #5762: Movement solver is insufficiently robust
 | 
			
		||||
    Bug #5807: Video decoding crash on ARM
 | 
			
		||||
    Bug #5821: NPCs from mods getting removed if mod order was changed
 | 
			
		||||
    Bug #5835: OpenMW doesn't accept negative values for NPC's hello, alarm, fight, and flee
 | 
			
		||||
    Bug #5836: OpenMW dialogue/greeting/voice filter doesn't accept negative Ai values for NPC's hello, alarm, fight, and flee
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -287,9 +287,9 @@ void FFmpeg_Decoder::close()
 | 
			
		|||
    mStream = nullptr;
 | 
			
		||||
 | 
			
		||||
    av_packet_unref(&mPacket);
 | 
			
		||||
    av_freep(&mFrame);
 | 
			
		||||
    swr_free(&mSwr);
 | 
			
		||||
    av_freep(&mDataBuf);
 | 
			
		||||
    av_frame_free(&mFrame);
 | 
			
		||||
    swr_free(&mSwr);
 | 
			
		||||
 | 
			
		||||
    if(mFormatCtx)
 | 
			
		||||
    {
 | 
			
		||||
| 
						 | 
				
			
			@ -302,11 +302,9 @@ void FFmpeg_Decoder::close()
 | 
			
		|||
            //
 | 
			
		||||
            if (mFormatCtx->pb->buffer != nullptr)
 | 
			
		||||
            {
 | 
			
		||||
                av_free(mFormatCtx->pb->buffer);
 | 
			
		||||
                mFormatCtx->pb->buffer = nullptr;
 | 
			
		||||
                av_freep(&mFormatCtx->pb->buffer);
 | 
			
		||||
            }
 | 
			
		||||
            av_free(mFormatCtx->pb);
 | 
			
		||||
            mFormatCtx->pb = nullptr;
 | 
			
		||||
            avio_context_free(&mFormatCtx->pb);
 | 
			
		||||
        }
 | 
			
		||||
        avformat_close_input(&mFormatCtx);
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -91,7 +91,7 @@ MovieAudioDecoder::~MovieAudioDecoder()
 | 
			
		|||
    if(mAudioContext)
 | 
			
		||||
        avcodec_free_context(&mAudioContext);
 | 
			
		||||
 | 
			
		||||
    av_freep(&mFrame);
 | 
			
		||||
    av_frame_free(&mFrame);
 | 
			
		||||
    av_freep(&mDataBuf);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -222,7 +222,7 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
 | 
			
		|||
            return result;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        av_packet_unref(&mPacket);
 | 
			
		||||
        av_packet_unref(pkt);
 | 
			
		||||
        mGetNextPacket = true;
 | 
			
		||||
 | 
			
		||||
        /* next packet */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										95
									
								
								extern/osg-ffmpeg-videoplayer/videostate.cpp
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										95
									
								
								extern/osg-ffmpeg-videoplayer/videostate.cpp
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -2,6 +2,7 @@
 | 
			
		|||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <cassert>
 | 
			
		||||
#include <cstddef>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <thread>
 | 
			
		||||
#include <chrono>
 | 
			
		||||
| 
						 | 
				
			
			@ -49,7 +50,7 @@ VideoState::VideoState()
 | 
			
		|||
    , av_sync_type(AV_SYNC_DEFAULT)
 | 
			
		||||
    , audio_st(nullptr)
 | 
			
		||||
    , video_st(nullptr), frame_last_pts(0.0)
 | 
			
		||||
    , video_clock(0.0), sws_context(nullptr), rgbaFrame(nullptr), pictq_size(0)
 | 
			
		||||
    , video_clock(0.0), sws_context(nullptr), pictq_size(0)
 | 
			
		||||
    , pictq_rindex(0), pictq_windex(0)
 | 
			
		||||
    , mSeekRequested(false)
 | 
			
		||||
    , mSeekPos(0)
 | 
			
		||||
| 
						 | 
				
			
			@ -82,10 +83,11 @@ void PacketQueue::put(AVPacket *pkt)
 | 
			
		|||
    pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
 | 
			
		||||
    if(!pkt1) throw std::bad_alloc();
 | 
			
		||||
 | 
			
		||||
    if(pkt != &flush_pkt && !pkt->buf && av_packet_ref(&pkt1->pkt, pkt) < 0)
 | 
			
		||||
        throw std::runtime_error("Failed to duplicate packet");
 | 
			
		||||
 | 
			
		||||
    if(pkt == &flush_pkt)
 | 
			
		||||
        pkt1->pkt = *pkt;
 | 
			
		||||
    else
 | 
			
		||||
        av_packet_move_ref(&pkt1->pkt, pkt);
 | 
			
		||||
 | 
			
		||||
    pkt1->next = nullptr;
 | 
			
		||||
 | 
			
		||||
    this->mutex.lock ();
 | 
			
		||||
| 
						 | 
				
			
			@ -116,7 +118,8 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is)
 | 
			
		|||
            this->nb_packets--;
 | 
			
		||||
            this->size -= pkt1->pkt.size;
 | 
			
		||||
 | 
			
		||||
            *pkt = pkt1->pkt;
 | 
			
		||||
            av_packet_unref(pkt);
 | 
			
		||||
            av_packet_move_ref(pkt, &pkt1->pkt);
 | 
			
		||||
            av_free(pkt1);
 | 
			
		||||
 | 
			
		||||
            return 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -155,6 +158,39 @@ void PacketQueue::clear()
 | 
			
		|||
    this->mutex.unlock ();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int VideoPicture::set_dimensions(int w, int h) {
 | 
			
		||||
  if (this->rgbaFrame != nullptr && this->rgbaFrame->width == w &&
 | 
			
		||||
      this->rgbaFrame->height == h) {
 | 
			
		||||
    return 0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::unique_ptr<AVFrame, VideoPicture::AVFrameDeleter> frame{
 | 
			
		||||
      av_frame_alloc()};
 | 
			
		||||
  if (frame == nullptr) {
 | 
			
		||||
    std::cerr << "av_frame_alloc failed" << std::endl;
 | 
			
		||||
    return -1;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  constexpr AVPixelFormat kPixFmt = AV_PIX_FMT_RGBA;
 | 
			
		||||
  frame->format = kPixFmt;
 | 
			
		||||
  frame->width = w;
 | 
			
		||||
  frame->height = h;
 | 
			
		||||
  if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height,
 | 
			
		||||
                     kPixFmt, 1) < 0) {
 | 
			
		||||
    std::cerr << "av_image_alloc failed" << std::endl;
 | 
			
		||||
    return -1;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  this->rgbaFrame = std::move(frame);
 | 
			
		||||
  return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void VideoPicture::AVFrameDeleter::operator()(AVFrame* frame) const
 | 
			
		||||
{
 | 
			
		||||
    av_freep(frame->data);
 | 
			
		||||
    av_frame_free(&frame);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int VideoState::istream_read(void *user_data, uint8_t *buf, int buf_size)
 | 
			
		||||
{
 | 
			
		||||
    try
 | 
			
		||||
| 
						 | 
				
			
			@ -220,7 +256,7 @@ void VideoState::video_display(VideoPicture *vp)
 | 
			
		|||
        osg::ref_ptr<osg::Image> image = new osg::Image;
 | 
			
		||||
 | 
			
		||||
        image->setImage(this->video_ctx->width, this->video_ctx->height,
 | 
			
		||||
                        1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, &vp->data[0], osg::Image::NO_DELETE);
 | 
			
		||||
                        1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, vp->rgbaFrame->data[0], osg::Image::NO_DELETE);
 | 
			
		||||
 | 
			
		||||
        mTexture->setImage(image);
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -296,23 +332,27 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
 | 
			
		|||
    // Convert the image into RGBA format
 | 
			
		||||
    // TODO: we could do this in a pixel shader instead, if the source format
 | 
			
		||||
    // matches a commonly used format (ie YUV420P)
 | 
			
		||||
    if(this->sws_context == nullptr)
 | 
			
		||||
    const int w = pFrame->width;
 | 
			
		||||
    const int h = pFrame->height;
 | 
			
		||||
    if(this->sws_context == nullptr || this->sws_context_w != w || this->sws_context_h != h)
 | 
			
		||||
    {
 | 
			
		||||
        int w = this->video_ctx->width;
 | 
			
		||||
        int h = this->video_ctx->height;
 | 
			
		||||
        if (this->sws_context != nullptr)
 | 
			
		||||
            sws_freeContext(this->sws_context);
 | 
			
		||||
        this->sws_context = sws_getContext(w, h, this->video_ctx->pix_fmt,
 | 
			
		||||
                                           w, h, AV_PIX_FMT_RGBA, SWS_BICUBIC,
 | 
			
		||||
                                           nullptr, nullptr, nullptr);
 | 
			
		||||
        if(this->sws_context == nullptr)
 | 
			
		||||
            throw std::runtime_error("Cannot initialize the conversion context!\n");
 | 
			
		||||
        this->sws_context_w = w;
 | 
			
		||||
        this->sws_context_h = h;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    vp->pts = pts;
 | 
			
		||||
    vp->data.resize(this->video_ctx->width * this->video_ctx->height * 4);
 | 
			
		||||
    if (vp->set_dimensions(w, h) < 0)
 | 
			
		||||
        return -1;
 | 
			
		||||
 | 
			
		||||
    uint8_t *dst[4] = { &vp->data[0], nullptr, nullptr, nullptr };
 | 
			
		||||
    sws_scale(this->sws_context, pFrame->data, pFrame->linesize,
 | 
			
		||||
              0, this->video_ctx->height, dst, this->rgbaFrame->linesize);
 | 
			
		||||
              0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize);
 | 
			
		||||
 | 
			
		||||
    // now we inform our display thread that we have a pic ready
 | 
			
		||||
    this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE;
 | 
			
		||||
| 
						 | 
				
			
			@ -360,13 +400,11 @@ public:
 | 
			
		|||
    {
 | 
			
		||||
        VideoState* self = mVideoState;
 | 
			
		||||
        AVPacket pkt1, *packet = &pkt1;
 | 
			
		||||
        av_init_packet(packet);
 | 
			
		||||
        AVFrame *pFrame;
 | 
			
		||||
 | 
			
		||||
        pFrame = av_frame_alloc();
 | 
			
		||||
 | 
			
		||||
        self->rgbaFrame = av_frame_alloc();
 | 
			
		||||
        av_image_alloc(self->rgbaFrame->data, self->rgbaFrame->linesize, self->video_ctx->width, self->video_ctx->height, AV_PIX_FMT_RGBA, 1);
 | 
			
		||||
 | 
			
		||||
        while(self->videoq.get(packet, self) >= 0)
 | 
			
		||||
        {
 | 
			
		||||
            if(packet->data == flush_pkt.data)
 | 
			
		||||
| 
						 | 
				
			
			@ -407,10 +445,7 @@ public:
 | 
			
		|||
 | 
			
		||||
        av_packet_unref(packet);
 | 
			
		||||
 | 
			
		||||
        av_free(pFrame);
 | 
			
		||||
 | 
			
		||||
        av_freep(&self->rgbaFrame->data[0]);
 | 
			
		||||
        av_free(self->rgbaFrame);
 | 
			
		||||
        av_frame_free(&pFrame);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
| 
						 | 
				
			
			@ -438,6 +473,7 @@ public:
 | 
			
		|||
 | 
			
		||||
        AVFormatContext *pFormatCtx = self->format_ctx;
 | 
			
		||||
        AVPacket pkt1, *packet = &pkt1;
 | 
			
		||||
        av_init_packet(packet);
 | 
			
		||||
 | 
			
		||||
        try
 | 
			
		||||
        {
 | 
			
		||||
| 
						 | 
				
			
			@ -673,16 +709,13 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
 | 
			
		|||
        {
 | 
			
		||||
          if (this->format_ctx->pb != nullptr)
 | 
			
		||||
          {
 | 
			
		||||
              av_free(this->format_ctx->pb->buffer);
 | 
			
		||||
              this->format_ctx->pb->buffer = nullptr;
 | 
			
		||||
 | 
			
		||||
              av_free(this->format_ctx->pb);
 | 
			
		||||
              this->format_ctx->pb = nullptr;
 | 
			
		||||
              av_freep(&this->format_ctx->pb->buffer);
 | 
			
		||||
              avio_context_free(&this->format_ctx->pb);
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
        // "Note that a user-supplied AVFormatContext will be freed on failure."
 | 
			
		||||
        this->format_ctx = nullptr;
 | 
			
		||||
        av_free(ioCtx);
 | 
			
		||||
        avio_context_free(&ioCtx);
 | 
			
		||||
        throw std::runtime_error("Failed to open video input");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -756,11 +789,8 @@ void VideoState::deinit()
 | 
			
		|||
        ///
 | 
			
		||||
        if (this->format_ctx->pb != nullptr)
 | 
			
		||||
        {
 | 
			
		||||
            av_free(this->format_ctx->pb->buffer);
 | 
			
		||||
            this->format_ctx->pb->buffer = nullptr;
 | 
			
		||||
 | 
			
		||||
            av_free(this->format_ctx->pb);
 | 
			
		||||
            this->format_ctx->pb = nullptr;
 | 
			
		||||
            av_freep(&this->format_ctx->pb->buffer);
 | 
			
		||||
            avio_context_free(&this->format_ctx->pb);
 | 
			
		||||
        }
 | 
			
		||||
        avformat_close_input(&this->format_ctx);
 | 
			
		||||
    }
 | 
			
		||||
| 
						 | 
				
			
			@ -771,6 +801,11 @@ void VideoState::deinit()
 | 
			
		|||
        mTexture->setImage(nullptr);
 | 
			
		||||
        mTexture = nullptr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Dellocate RGBA frame queue.
 | 
			
		||||
    for (std::size_t i = 0; i < VIDEO_PICTURE_ARRAY_SIZE; ++i)
 | 
			
		||||
        this->pictq[i].rgbaFrame = nullptr;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double VideoState::get_external_clock()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										13
									
								
								extern/osg-ffmpeg-videoplayer/videostate.hpp
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								extern/osg-ffmpeg-videoplayer/videostate.hpp
									
									
									
									
										vendored
									
									
								
							| 
						 | 
				
			
			@ -95,7 +95,16 @@ struct VideoPicture {
 | 
			
		|||
    VideoPicture() : pts(0.0)
 | 
			
		||||
    { }
 | 
			
		||||
 | 
			
		||||
    std::vector<uint8_t> data;
 | 
			
		||||
    struct AVFrameDeleter {
 | 
			
		||||
        void operator()(AVFrame* frame) const;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // Sets frame dimensions.
 | 
			
		||||
    // Must be called before writing to `rgbaFrame`.
 | 
			
		||||
    // Return -1 on error.
 | 
			
		||||
    int set_dimensions(int w, int h);
 | 
			
		||||
 | 
			
		||||
    std::unique_ptr<AVFrame, AVFrameDeleter> rgbaFrame;
 | 
			
		||||
    double pts;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -159,8 +168,8 @@ struct VideoState {
 | 
			
		|||
    double      video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
 | 
			
		||||
    PacketQueue videoq;
 | 
			
		||||
    SwsContext*  sws_context;
 | 
			
		||||
    int sws_context_w, sws_context_h;
 | 
			
		||||
    VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE];
 | 
			
		||||
    AVFrame*     rgbaFrame; // used as buffer for the frame converted from its native format to RGBA
 | 
			
		||||
    int          pictq_size, pictq_rindex, pictq_windex;
 | 
			
		||||
    std::mutex pictq_mutex;
 | 
			
		||||
    std::condition_variable pictq_cond;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue