1
0
Fork 0
mirror of https://github.com/OpenMW/openmw.git synced 2025-02-06 00:45:34 +00:00

Merge branch 'fix-video-arm' into 'master'

osg-ffmpeg-videoplayer: Fix crash on ARM

Closes #5807

See merge request OpenMW/openmw!564
This commit is contained in:
psi29a 2021-03-08 08:05:25 +00:00
commit 194875dec1
5 changed files with 83 additions and 40 deletions

View file

@ -97,6 +97,7 @@
Bug #5739: Saving and loading the save a second or two before hitting the ground doesn't count fall damage Bug #5739: Saving and loading the save a second or two before hitting the ground doesn't count fall damage
Bug #5758: Paralyzed actors behavior is inconsistent with vanilla Bug #5758: Paralyzed actors behavior is inconsistent with vanilla
Bug #5762: Movement solver is insufficiently robust Bug #5762: Movement solver is insufficiently robust
Bug #5807: Video decoding crash on ARM
Bug #5821: NPCs from mods getting removed if mod order was changed Bug #5821: NPCs from mods getting removed if mod order was changed
Bug #5835: OpenMW doesn't accept negative values for NPC's hello, alarm, fight, and flee Bug #5835: OpenMW doesn't accept negative values for NPC's hello, alarm, fight, and flee
Bug #5836: OpenMW dialogue/greeting/voice filter doesn't accept negative Ai values for NPC's hello, alarm, fight, and flee Bug #5836: OpenMW dialogue/greeting/voice filter doesn't accept negative Ai values for NPC's hello, alarm, fight, and flee

View file

@ -287,9 +287,9 @@ void FFmpeg_Decoder::close()
mStream = nullptr; mStream = nullptr;
av_packet_unref(&mPacket); av_packet_unref(&mPacket);
av_freep(&mFrame);
swr_free(&mSwr);
av_freep(&mDataBuf); av_freep(&mDataBuf);
av_frame_free(&mFrame);
swr_free(&mSwr);
if(mFormatCtx) if(mFormatCtx)
{ {
@ -302,11 +302,9 @@ void FFmpeg_Decoder::close()
// //
if (mFormatCtx->pb->buffer != nullptr) if (mFormatCtx->pb->buffer != nullptr)
{ {
av_free(mFormatCtx->pb->buffer); av_freep(&mFormatCtx->pb->buffer);
mFormatCtx->pb->buffer = nullptr;
} }
av_free(mFormatCtx->pb); avio_context_free(&mFormatCtx->pb);
mFormatCtx->pb = nullptr;
} }
avformat_close_input(&mFormatCtx); avformat_close_input(&mFormatCtx);
} }

View file

@ -91,7 +91,7 @@ MovieAudioDecoder::~MovieAudioDecoder()
if(mAudioContext) if(mAudioContext)
avcodec_free_context(&mAudioContext); avcodec_free_context(&mAudioContext);
av_freep(&mFrame); av_frame_free(&mFrame);
av_freep(&mDataBuf); av_freep(&mDataBuf);
} }
@ -222,7 +222,7 @@ int MovieAudioDecoder::audio_decode_frame(AVFrame *frame, int &sample_skip)
return result; return result;
} }
av_packet_unref(&mPacket); av_packet_unref(pkt);
mGetNextPacket = true; mGetNextPacket = true;
/* next packet */ /* next packet */

View file

@ -2,6 +2,7 @@
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
#include <cstddef>
#include <iostream> #include <iostream>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
@ -49,7 +50,7 @@ VideoState::VideoState()
, av_sync_type(AV_SYNC_DEFAULT) , av_sync_type(AV_SYNC_DEFAULT)
, audio_st(nullptr) , audio_st(nullptr)
, video_st(nullptr), frame_last_pts(0.0) , video_st(nullptr), frame_last_pts(0.0)
, video_clock(0.0), sws_context(nullptr), rgbaFrame(nullptr), pictq_size(0) , video_clock(0.0), sws_context(nullptr), pictq_size(0)
, pictq_rindex(0), pictq_windex(0) , pictq_rindex(0), pictq_windex(0)
, mSeekRequested(false) , mSeekRequested(false)
, mSeekPos(0) , mSeekPos(0)
@ -82,10 +83,11 @@ void PacketQueue::put(AVPacket *pkt)
pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList)); pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
if(!pkt1) throw std::bad_alloc(); if(!pkt1) throw std::bad_alloc();
if(pkt != &flush_pkt && !pkt->buf && av_packet_ref(&pkt1->pkt, pkt) < 0) if(pkt == &flush_pkt)
throw std::runtime_error("Failed to duplicate packet");
pkt1->pkt = *pkt; pkt1->pkt = *pkt;
else
av_packet_move_ref(&pkt1->pkt, pkt);
pkt1->next = nullptr; pkt1->next = nullptr;
this->mutex.lock (); this->mutex.lock ();
@ -116,7 +118,8 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is)
this->nb_packets--; this->nb_packets--;
this->size -= pkt1->pkt.size; this->size -= pkt1->pkt.size;
*pkt = pkt1->pkt; av_packet_unref(pkt);
av_packet_move_ref(pkt, &pkt1->pkt);
av_free(pkt1); av_free(pkt1);
return 1; return 1;
@ -155,6 +158,39 @@ void PacketQueue::clear()
this->mutex.unlock (); this->mutex.unlock ();
} }
int VideoPicture::set_dimensions(int w, int h) {
if (this->rgbaFrame != nullptr && this->rgbaFrame->width == w &&
this->rgbaFrame->height == h) {
return 0;
}
std::unique_ptr<AVFrame, VideoPicture::AVFrameDeleter> frame{
av_frame_alloc()};
if (frame == nullptr) {
std::cerr << "av_frame_alloc failed" << std::endl;
return -1;
}
constexpr AVPixelFormat kPixFmt = AV_PIX_FMT_RGBA;
frame->format = kPixFmt;
frame->width = w;
frame->height = h;
if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height,
kPixFmt, 1) < 0) {
std::cerr << "av_image_alloc failed" << std::endl;
return -1;
}
this->rgbaFrame = std::move(frame);
return 0;
}
void VideoPicture::AVFrameDeleter::operator()(AVFrame* frame) const
{
av_freep(frame->data);
av_frame_free(&frame);
}
int VideoState::istream_read(void *user_data, uint8_t *buf, int buf_size) int VideoState::istream_read(void *user_data, uint8_t *buf, int buf_size)
{ {
try try
@ -220,7 +256,7 @@ void VideoState::video_display(VideoPicture *vp)
osg::ref_ptr<osg::Image> image = new osg::Image; osg::ref_ptr<osg::Image> image = new osg::Image;
image->setImage(this->video_ctx->width, this->video_ctx->height, image->setImage(this->video_ctx->width, this->video_ctx->height,
1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, &vp->data[0], osg::Image::NO_DELETE); 1, GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, vp->rgbaFrame->data[0], osg::Image::NO_DELETE);
mTexture->setImage(image); mTexture->setImage(image);
} }
@ -296,23 +332,27 @@ int VideoState::queue_picture(AVFrame *pFrame, double pts)
// Convert the image into RGBA format // Convert the image into RGBA format
// TODO: we could do this in a pixel shader instead, if the source format // TODO: we could do this in a pixel shader instead, if the source format
// matches a commonly used format (ie YUV420P) // matches a commonly used format (ie YUV420P)
if(this->sws_context == nullptr) const int w = pFrame->width;
const int h = pFrame->height;
if(this->sws_context == nullptr || this->sws_context_w != w || this->sws_context_h != h)
{ {
int w = this->video_ctx->width; if (this->sws_context != nullptr)
int h = this->video_ctx->height; sws_freeContext(this->sws_context);
this->sws_context = sws_getContext(w, h, this->video_ctx->pix_fmt, this->sws_context = sws_getContext(w, h, this->video_ctx->pix_fmt,
w, h, AV_PIX_FMT_RGBA, SWS_BICUBIC, w, h, AV_PIX_FMT_RGBA, SWS_BICUBIC,
nullptr, nullptr, nullptr); nullptr, nullptr, nullptr);
if(this->sws_context == nullptr) if(this->sws_context == nullptr)
throw std::runtime_error("Cannot initialize the conversion context!\n"); throw std::runtime_error("Cannot initialize the conversion context!\n");
this->sws_context_w = w;
this->sws_context_h = h;
} }
vp->pts = pts; vp->pts = pts;
vp->data.resize(this->video_ctx->width * this->video_ctx->height * 4); if (vp->set_dimensions(w, h) < 0)
return -1;
uint8_t *dst[4] = { &vp->data[0], nullptr, nullptr, nullptr };
sws_scale(this->sws_context, pFrame->data, pFrame->linesize, sws_scale(this->sws_context, pFrame->data, pFrame->linesize,
0, this->video_ctx->height, dst, this->rgbaFrame->linesize); 0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize);
// now we inform our display thread that we have a pic ready // now we inform our display thread that we have a pic ready
this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE; this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE;
@ -360,13 +400,11 @@ public:
{ {
VideoState* self = mVideoState; VideoState* self = mVideoState;
AVPacket pkt1, *packet = &pkt1; AVPacket pkt1, *packet = &pkt1;
av_init_packet(packet);
AVFrame *pFrame; AVFrame *pFrame;
pFrame = av_frame_alloc(); pFrame = av_frame_alloc();
self->rgbaFrame = av_frame_alloc();
av_image_alloc(self->rgbaFrame->data, self->rgbaFrame->linesize, self->video_ctx->width, self->video_ctx->height, AV_PIX_FMT_RGBA, 1);
while(self->videoq.get(packet, self) >= 0) while(self->videoq.get(packet, self) >= 0)
{ {
if(packet->data == flush_pkt.data) if(packet->data == flush_pkt.data)
@ -407,10 +445,7 @@ public:
av_packet_unref(packet); av_packet_unref(packet);
av_free(pFrame); av_frame_free(&pFrame);
av_freep(&self->rgbaFrame->data[0]);
av_free(self->rgbaFrame);
} }
private: private:
@ -438,6 +473,7 @@ public:
AVFormatContext *pFormatCtx = self->format_ctx; AVFormatContext *pFormatCtx = self->format_ctx;
AVPacket pkt1, *packet = &pkt1; AVPacket pkt1, *packet = &pkt1;
av_init_packet(packet);
try try
{ {
@ -673,16 +709,13 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
{ {
if (this->format_ctx->pb != nullptr) if (this->format_ctx->pb != nullptr)
{ {
av_free(this->format_ctx->pb->buffer); av_freep(&this->format_ctx->pb->buffer);
this->format_ctx->pb->buffer = nullptr; avio_context_free(&this->format_ctx->pb);
av_free(this->format_ctx->pb);
this->format_ctx->pb = nullptr;
} }
} }
// "Note that a user-supplied AVFormatContext will be freed on failure." // "Note that a user-supplied AVFormatContext will be freed on failure."
this->format_ctx = nullptr; this->format_ctx = nullptr;
av_free(ioCtx); avio_context_free(&ioCtx);
throw std::runtime_error("Failed to open video input"); throw std::runtime_error("Failed to open video input");
} }
@ -756,11 +789,8 @@ void VideoState::deinit()
/// ///
if (this->format_ctx->pb != nullptr) if (this->format_ctx->pb != nullptr)
{ {
av_free(this->format_ctx->pb->buffer); av_freep(&this->format_ctx->pb->buffer);
this->format_ctx->pb->buffer = nullptr; avio_context_free(&this->format_ctx->pb);
av_free(this->format_ctx->pb);
this->format_ctx->pb = nullptr;
} }
avformat_close_input(&this->format_ctx); avformat_close_input(&this->format_ctx);
} }
@ -771,6 +801,11 @@ void VideoState::deinit()
mTexture->setImage(nullptr); mTexture->setImage(nullptr);
mTexture = nullptr; mTexture = nullptr;
} }
// Dellocate RGBA frame queue.
for (std::size_t i = 0; i < VIDEO_PICTURE_ARRAY_SIZE; ++i)
this->pictq[i].rgbaFrame = nullptr;
} }
double VideoState::get_external_clock() double VideoState::get_external_clock()

View file

@ -95,7 +95,16 @@ struct VideoPicture {
VideoPicture() : pts(0.0) VideoPicture() : pts(0.0)
{ } { }
std::vector<uint8_t> data; struct AVFrameDeleter {
void operator()(AVFrame* frame) const;
};
// Sets frame dimensions.
// Must be called before writing to `rgbaFrame`.
// Return -1 on error.
int set_dimensions(int w, int h);
std::unique_ptr<AVFrame, AVFrameDeleter> rgbaFrame;
double pts; double pts;
}; };
@ -159,8 +168,8 @@ struct VideoState {
double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
PacketQueue videoq; PacketQueue videoq;
SwsContext* sws_context; SwsContext* sws_context;
int sws_context_w, sws_context_h;
VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE]; VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE];
AVFrame* rgbaFrame; // used as buffer for the frame converted from its native format to RGBA
int pictq_size, pictq_rindex, pictq_windex; int pictq_size, pictq_rindex, pictq_windex;
std::mutex pictq_mutex; std::mutex pictq_mutex;
std::condition_variable pictq_cond; std::condition_variable pictq_cond;