osg: fix ffmpeg3.x build
Thanks to the Arch Linux team for the patch.
This commit is contained in:
parent
b38efd67b5
commit
e7c91f0ee4
|
@ -0,0 +1,157 @@
|
|||
Description: Replace deprecated FFmpeg API
|
||||
Author: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
|
||||
Last-Update: <2015-11-02>
|
||||
|
||||
--- src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
|
||||
+++ src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
|
||||
@@ -71,7 +71,7 @@ void FFmpegDecoderVideo::open(AVStream *
|
||||
findAspectRatio();
|
||||
|
||||
// Find out whether we support Alpha channel
|
||||
- m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
|
||||
+ m_alpha_channel = (m_context->pix_fmt == AV_PIX_FMT_YUVA420P);
|
||||
|
||||
// Find out the framerate
|
||||
m_frame_rate = av_q2d(stream->avg_frame_rate);
|
||||
@@ -91,20 +91,19 @@ void FFmpegDecoderVideo::open(AVStream *
|
||||
throw std::runtime_error("avcodec_open() failed");
|
||||
|
||||
// Allocate video frame
|
||||
- m_frame.reset(avcodec_alloc_frame());
|
||||
+ m_frame.reset(av_frame_alloc());
|
||||
|
||||
// Allocate converted RGB frame
|
||||
- m_frame_rgba.reset(avcodec_alloc_frame());
|
||||
- m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB24, width(), height()));
|
||||
+ m_frame_rgba.reset(av_frame_alloc());
|
||||
+ m_buffer_rgba[0].resize(avpicture_get_size(AV_PIX_FMT_RGB24, width(), height()));
|
||||
m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
|
||||
|
||||
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
|
||||
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB24, width(), height());
|
||||
+ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height());
|
||||
|
||||
// Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
|
||||
m_context->opaque = this;
|
||||
- m_context->get_buffer = getBuffer;
|
||||
- m_context->release_buffer = releaseBuffer;
|
||||
+ m_context->get_buffer2 = getBuffer;
|
||||
}
|
||||
|
||||
|
||||
@@ -263,8 +262,8 @@ int FFmpegDecoderVideo::convert(AVPictur
|
||||
#ifdef USE_SWSCALE
|
||||
if (m_swscale_ctx==0)
|
||||
{
|
||||
- m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
|
||||
- src_width, src_height, (PixelFormat) dst_pix_fmt,
|
||||
+ m_swscale_ctx = sws_getContext(src_width, src_height, (AVPixelFormat) src_pix_fmt,
|
||||
+ src_width, src_height, (AVPixelFormat) dst_pix_fmt,
|
||||
/*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
@@ -311,14 +310,14 @@ void FFmpegDecoderVideo::publishFrame(co
|
||||
AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
|
||||
|
||||
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
|
||||
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB24, width(), height());
|
||||
+ avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height());
|
||||
|
||||
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
|
||||
|
||||
- if (m_context->pix_fmt == PIX_FMT_YUVA420P)
|
||||
+ if (m_context->pix_fmt == AV_PIX_FMT_YUVA420P)
|
||||
yuva420pToRgba(dst, src, width(), height());
|
||||
else
|
||||
- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
|
||||
+ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
|
||||
|
||||
// Wait 'delay' seconds before publishing the picture.
|
||||
int i_delay = static_cast<int>(delay * 1000000 + 0.5);
|
||||
@@ -345,7 +344,7 @@ void FFmpegDecoderVideo::publishFrame(co
|
||||
|
||||
void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
|
||||
{
|
||||
- convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
|
||||
+ convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
|
||||
|
||||
const size_t bpp = 4;
|
||||
|
||||
@@ -363,31 +362,28 @@ void FFmpegDecoderVideo::yuva420pToRgba(
|
||||
}
|
||||
}
|
||||
|
||||
-
|
||||
-
|
||||
-int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
|
||||
+int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture, int flags)
|
||||
{
|
||||
+ AVBufferRef *ref;
|
||||
const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
|
||||
|
||||
- const int result = avcodec_default_get_buffer(context, picture);
|
||||
+ const int result = avcodec_default_get_buffer2(context, picture, flags);
|
||||
int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
|
||||
|
||||
*p_pts = this_->m_packet_pts;
|
||||
picture->opaque = p_pts;
|
||||
|
||||
+ ref = av_buffer_create((uint8_t *)picture->opaque, sizeof(int64_t), FFmpegDecoderVideo::freeBuffer, picture->buf[0], flags);
|
||||
+ picture->buf[0] = ref;
|
||||
+
|
||||
return result;
|
||||
}
|
||||
|
||||
-
|
||||
-
|
||||
-void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
|
||||
+void FFmpegDecoderVideo::freeBuffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
- if (picture != 0)
|
||||
- av_freep(&picture->opaque);
|
||||
-
|
||||
- avcodec_default_release_buffer(context, picture);
|
||||
+ AVBufferRef *ref = (AVBufferRef *)opaque;
|
||||
+ av_buffer_unref(&ref);
|
||||
+ av_free(data);
|
||||
}
|
||||
|
||||
-
|
||||
-
|
||||
} // namespace osgFFmpeg
|
||||
--- src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
|
||||
+++ src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
|
||||
@@ -94,8 +94,8 @@ private:
|
||||
int src_pix_fmt, int src_width, int src_height);
|
||||
|
||||
|
||||
- static int getBuffer(AVCodecContext * context, AVFrame * picture);
|
||||
- static void releaseBuffer(AVCodecContext * context, AVFrame * picture);
|
||||
+ static int getBuffer(AVCodecContext * context, AVFrame * picture, int flags);
|
||||
+ static void freeBuffer(void * opaque, uint8_t *data);
|
||||
|
||||
PacketQueue & m_packets;
|
||||
FFmpegClocks & m_clocks;
|
||||
--- src/osgPlugins/ffmpeg/FFmpegParameters.cpp
|
||||
+++ src/osgPlugins/ffmpeg/FFmpegParameters.cpp
|
||||
@@ -19,7 +19,7 @@ extern "C"
|
||||
#include <libavutil/pixdesc.h>
|
||||
}
|
||||
|
||||
-inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
|
||||
+inline AVPixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
|
||||
|
||||
|
||||
namespace osgFFmpeg {
|
||||
--- src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp.orig 2016-02-18 21:25:39.627923629 +0000
|
||||
+++ src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp 2016-02-18 21:26:17.071140100 +0000
|
||||
@@ -227,8 +227,7 @@
|
||||
if (avcodec_open2(m_context, p_codec, NULL) < 0)
|
||||
throw std::runtime_error("avcodec_open() failed");
|
||||
|
||||
- m_context->get_buffer = avcodec_default_get_buffer;
|
||||
- m_context->release_buffer = avcodec_default_release_buffer;
|
||||
+ m_context->get_buffer2 = avcodec_default_get_buffer2;
|
||||
|
||||
}
|
||||
|
|
@ -1,9 +1,7 @@
|
|||
broken="xine"
|
||||
|
||||
# Template file for 'osg'
|
||||
pkgname=osg
|
||||
version=3.4.0
|
||||
revision=4
|
||||
revision=5
|
||||
wrksrc=OpenSceneGraph-${version}
|
||||
build_style=cmake
|
||||
# don't use /usr/lib64 on 64bit platforms
|
||||
|
|
Loading…
Reference in New Issue