summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--media/base/video_frame.cc29
-rw-r--r--media/base/video_frame.h17
-rw-r--r--media/base/video_frame_unittest.cc26
-rw-r--r--media/filters/ffmpeg_video_allocator.cc159
-rw-r--r--media/filters/ffmpeg_video_allocator.h87
-rw-r--r--media/media.gyp2
-rw-r--r--media/tools/player_x11/gles_video_renderer.cc11
7 files changed, 273 insertions, 58 deletions
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 857af70..e7da94e 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -50,21 +50,26 @@ void VideoFrame::CreateFrame(VideoFrame::Format format,
*frame_out = alloc_worked ? frame : NULL;
}
-void VideoFrame::CreateFrameExternal(VideoFrame::Format format,
+void VideoFrame::CreateFrameExternal(SurfaceType type,
+ Format format,
size_t width,
size_t height,
+ size_t planes,
uint8* const data[kMaxPlanes],
const int32 strides[kMaxPlanes],
base::TimeDelta timestamp,
base::TimeDelta duration,
+ void* private_buffer,
scoped_refptr<VideoFrame>* frame_out) {
DCHECK(frame_out);
scoped_refptr<VideoFrame> frame =
- new VideoFrame(VideoFrame::TYPE_SYSTEM_MEMORY, format, width, height);
+ new VideoFrame(type, format, width, height);
if (frame) {
frame->SetTimestamp(timestamp);
frame->SetDuration(duration);
frame->external_memory_ = true;
+ frame->planes_ = planes;
+ frame->private_buffer_ = private_buffer;
for (size_t i = 0; i < kMaxPlanes; ++i) {
frame->data_[i] = data[i];
frame->strides_[i] = strides[i];
@@ -117,26 +122,6 @@ void VideoFrame::CreateBlackFrame(int width, int height,
*frame_out = frame;
}
-// static
-void VideoFrame::CreatePrivateFrame(VideoFrame::SurfaceType type,
- VideoFrame::Format format,
- size_t width,
- size_t height,
- base::TimeDelta timestamp,
- base::TimeDelta duration,
- void* private_buffer,
- scoped_refptr<VideoFrame>* frame_out) {
- DCHECK(frame_out);
- scoped_refptr<VideoFrame> frame =
- new VideoFrame(type, format, width, height);
- if (frame) {
- frame->SetTimestamp(timestamp);
- frame->SetDuration(duration);
- frame->private_buffer_ = private_buffer;
- }
- *frame_out = frame;
-}
-
static inline size_t RoundUp(size_t value, size_t alignment) {
// Check that |alignment| is a power of 2.
DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index ddf6644..a135bdf 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -39,7 +39,7 @@ class VideoFrame : public StreamSample {
enum SurfaceType {
TYPE_SYSTEM_MEMORY,
- TYPE_OMX_BUFFER_HEAD,
+ TYPE_OMXBUFFERHEAD,
TYPE_EGL_IMAGE,
};
@@ -56,13 +56,16 @@ class VideoFrame : public StreamSample {
// Creates a new frame with given parameters. Buffers for the frame are
// provided externally. Reference to the buffers and strides are copied
// from |data| and |strides| respectively.
- static void CreateFrameExternal(Format format,
+ static void CreateFrameExternal(SurfaceType type,
+ Format format,
size_t width,
size_t height,
+ size_t planes,
uint8* const data[kMaxPlanes],
const int32 strides[kMaxPlanes],
base::TimeDelta timestamp,
base::TimeDelta duration,
+ void* private_buffer,
scoped_refptr<VideoFrame>* frame_out);
// Creates a frame with format equals to VideoFrame::EMPTY, width, height
@@ -74,16 +77,6 @@ class VideoFrame : public StreamSample {
static void CreateBlackFrame(int width, int height,
scoped_refptr<VideoFrame>* frame_out);
- // Creates a new frame of |type| with given parameters.
- static void CreatePrivateFrame(VideoFrame::SurfaceType type,
- VideoFrame::Format format,
- size_t width,
- size_t height,
- base::TimeDelta timestamp,
- base::TimeDelta duration,
- void* private_buffer,
- scoped_refptr<VideoFrame>* frame_out);
-
virtual SurfaceType type() const { return type_; }
Format format() const { return format_; }
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index df407b5..633ccc0 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -176,35 +176,17 @@ TEST(VideoFrame, CreateBlackFrame) {
}
}
-TEST(VideoFrame, CreatePrivateFrame) {
- void* private_buffer = NULL;
- const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
- const base::TimeDelta kDurationA = base::TimeDelta::FromMicroseconds(1667);
-
- // Create an EGL Frame.
- scoped_refptr<media::VideoFrame> frame;
- VideoFrame::CreatePrivateFrame(media::VideoFrame::TYPE_EGL_IMAGE,
- media::VideoFrame::RGBA, 0, 0,
- kTimestampA, kDurationA,
- private_buffer, &frame);
- ASSERT_TRUE(frame);
-
- // Test |frame| properties.
- EXPECT_EQ(media::VideoFrame::TYPE_EGL_IMAGE, frame->type());
- EXPECT_EQ(media::VideoFrame::RGBA, frame->format());
- EXPECT_EQ(private_buffer, frame->private_buffer());
- EXPECT_EQ(NULL, frame->data(VideoFrame::kYPlane));
-}
-
TEST(VideoFram, CreateExternalFrame) {
scoped_array<uint8> memory(new uint8[1]);
scoped_refptr<media::VideoFrame> frame;
uint8* data[3] = {memory.get(), NULL, NULL};
int strides[3] = {1, 0, 0};
- VideoFrame::CreateFrameExternal(media::VideoFrame::RGB32, 0, 0,
+ VideoFrame::CreateFrameExternal(media::VideoFrame::TYPE_SYSTEM_MEMORY,
+ media::VideoFrame::RGB32, 0, 0, 3,
data, strides,
- base::TimeDelta(), base::TimeDelta(), &frame);
+ base::TimeDelta(), base::TimeDelta(),
+ NULL, &frame);
ASSERT_TRUE(frame);
// Test frame properties.
diff --git a/media/filters/ffmpeg_video_allocator.cc b/media/filters/ffmpeg_video_allocator.cc
new file mode 100644
index 0000000..5bf9448
--- /dev/null
+++ b/media/filters/ffmpeg_video_allocator.cc
@@ -0,0 +1,159 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/ffmpeg_video_allocator.h"
+
+#include "media/ffmpeg/ffmpeg_common.h"
+
+namespace media {
+
+FFmpegVideoAllocator::FFmpegVideoAllocator()
+ : get_buffer_(NULL),
+ release_buffer_(NULL) {
+}
+
+void FFmpegVideoAllocator::Initialize(AVCodecContext* codec_context,
+ VideoFrame::Format surface_format) {
+ surface_format_ = surface_format;
+ get_buffer_ = codec_context->get_buffer;
+ release_buffer_ = codec_context->release_buffer;
+ codec_context->get_buffer = AllocateBuffer;
+ codec_context->release_buffer = ReleaseBuffer;
+ codec_context->opaque = this;
+}
+
+void FFmpegVideoAllocator::Stop(AVCodecContext* codec_context) {
+ // Restore default buffer allocator functions.
+ // This does not work actually, because in ffmpeg-mt, there are
+ // multiple codec_context copies per threads. each context maintain
+ // its own internal buffer pools.
+ codec_context->get_buffer = get_buffer_;
+ codec_context->release_buffer = release_buffer_;
+
+ while (!frame_pool_.empty()) {
+ RefCountedAVFrame* ffmpeg_video_frame = frame_pool_.front();
+ frame_pool_.pop_front();
+ ffmpeg_video_frame->av_frame_.opaque = NULL;
+
+ // Reset per-context default buffer release functions.
+ ffmpeg_video_frame->av_frame_.owner->release_buffer = release_buffer_;
+ ffmpeg_video_frame->av_frame_.owner->get_buffer = get_buffer_;
+ delete ffmpeg_video_frame;
+ }
+ for (int i = 0; i < kMaxFFmpegThreads; ++i)
+ available_frames_[i].clear();
+ codec_index_map_.clear();
+}
+
+void FFmpegVideoAllocator::DisplayDone(
+ AVCodecContext* codec_context,
+ scoped_refptr<VideoFrame> video_frame) {
+ RefCountedAVFrame* ffmpeg_video_frame =
+ reinterpret_cast<RefCountedAVFrame*>(video_frame->private_buffer());
+ if (ffmpeg_video_frame->Release() == 0) {
+ int index = codec_index_map_[ffmpeg_video_frame->av_frame_.owner];
+ available_frames_[index].push_back(ffmpeg_video_frame);
+ }
+}
+
+scoped_refptr<VideoFrame> FFmpegVideoAllocator::DecodeDone(
+ AVCodecContext* codec_context,
+ AVFrame* av_frame) {
+ RefCountedAVFrame* ffmpeg_video_frame =
+ reinterpret_cast<RefCountedAVFrame*>(av_frame->opaque);
+ ffmpeg_video_frame->av_frame_ = *av_frame;
+ ffmpeg_video_frame->AddRef();
+
+ scoped_refptr<VideoFrame> frame;
+ VideoFrame::CreateFrameExternal(
+ VideoFrame::TYPE_SYSTEM_MEMORY, surface_format_,
+ codec_context->width, codec_context->height, 3,
+ av_frame->data,
+ av_frame->linesize,
+ StreamSample::kInvalidTimestamp,
+ StreamSample::kInvalidTimestamp,
+ ffmpeg_video_frame, // |private_buffer_|.
+ &frame);
+ return frame;
+}
+
+int FFmpegVideoAllocator::AllocateBuffer(AVCodecContext* codec_context,
+ AVFrame* av_frame) {
+ FFmpegVideoAllocator* context =
+ reinterpret_cast<FFmpegVideoAllocator*>(codec_context->opaque);
+ return context->InternalAllocateBuffer(codec_context, av_frame);
+}
+
+void FFmpegVideoAllocator::ReleaseBuffer(AVCodecContext* codec_context,
+ AVFrame* av_frame) {
+ FFmpegVideoAllocator* context =
+ reinterpret_cast<FFmpegVideoAllocator*>(codec_context->opaque);
+ context->InternalReleaseBuffer(codec_context, av_frame);
+}
+
+int FFmpegVideoAllocator::InternalAllocateBuffer(
+ AVCodecContext* codec_context, AVFrame* av_frame) {
+ // If |codec_context| is not yet known to us, we add it to our map.
+ if (codec_index_map_.find(codec_context) == codec_index_map_.end()) {
+ int next_index = codec_index_map_.size();
+ codec_index_map_[codec_context] = next_index;
+ CHECK_LE((int)codec_index_map_.size(), kMaxFFmpegThreads);
+ }
+
+ int index = codec_index_map_[codec_context];
+
+ RefCountedAVFrame* ffmpeg_video_frame;
+ if (available_frames_[index].empty()) {
+ int ret = get_buffer_(codec_context, av_frame);
+ CHECK_EQ(ret, 0);
+ ffmpeg_video_frame = new RefCountedAVFrame();
+ ffmpeg_video_frame->av_frame_ = *av_frame;
+ frame_pool_.push_back(ffmpeg_video_frame);
+ } else {
+ ffmpeg_video_frame = available_frames_[index].front();
+ available_frames_[index].pop_front();
+ // We assume |get_buffer| immediately after |release_buffer| will
+ // not trigger real buffer allocation. We just use it to fill the
+ // correct value inside |pic|.
+ release_buffer_(codec_context, &ffmpeg_video_frame->av_frame_);
+ get_buffer_(codec_context, av_frame);
+ ffmpeg_video_frame->av_frame_ = *av_frame;
+ }
+
+ av_frame->opaque = ffmpeg_video_frame;
+ av_frame->type = FF_BUFFER_TYPE_USER;
+ ffmpeg_video_frame->AddRef();
+ return 0;
+}
+
+void FFmpegVideoAllocator::InternalReleaseBuffer(
+ AVCodecContext* codec_context, AVFrame* av_frame) {
+ if (av_frame->opaque == NULL) {
+ // This could happened in two scenario:
+ // 1. FFMPEG-MT H264 codec seems to allocate one frame during
+ // av_find_stream_info. This happens before we could even
+ // install the custom allocator functions.
+ // 2. When clean up time, we reset pic->opaque, and destruct ourselves.
+ // We could not use our own release_buffer function because
+ // handle-delayed-release() is called after we get destructed.
+ release_buffer_(codec_context, av_frame);
+ return;
+ }
+
+ RefCountedAVFrame* ffmpeg_video_frame =
+ reinterpret_cast<RefCountedAVFrame*>(av_frame->opaque);
+ release_buffer_(codec_context, av_frame);
+
+ // This is required for get_buffer().
+ ffmpeg_video_frame->av_frame_.data[0] = NULL;
+ get_buffer_(codec_context, &ffmpeg_video_frame->av_frame_);
+ int index = codec_index_map_[codec_context];
+ if (ffmpeg_video_frame->Release() == 0)
+ available_frames_[index].push_back(ffmpeg_video_frame);
+
+ for(int k = 0; k < 4; ++k)
+ av_frame->data[k]=NULL;
+}
+
+} // namespace media
diff --git a/media/filters/ffmpeg_video_allocator.h b/media/filters/ffmpeg_video_allocator.h
new file mode 100644
index 0000000..e2ff670
--- /dev/null
+++ b/media/filters/ffmpeg_video_allocator.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_FFMPEG_VIDEO_ALLOCATOR_H_
+#define MEDIA_FILTERS_FFMPEG_VIDEO_ALLOCATOR_H_
+
+#include "base/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/ffmpeg/ffmpeg_util.h"
+
+#include <deque>
+#include <map>
+
+// FFmpeg types.
+struct AVCodecContext;
+struct AVFrame;
+struct AVStream;
+
+namespace media {
+
+class FFmpegVideoAllocator {
+ public:
+ FFmpegVideoAllocator();
+ virtual ~FFmpegVideoAllocator() {}
+
+ struct RefCountedAVFrame {
+ RefCountedAVFrame() : usage_count_(0) {}
+ ~RefCountedAVFrame() { DCHECK_EQ(usage_count_, 0); }
+ void AddRef() {
+ base::AtomicRefCountIncN(&usage_count_, 1);
+ }
+
+ bool Release() {
+ return base::AtomicRefCountDecN(&usage_count_, 1);
+ }
+
+ AVFrame av_frame_;
+ base::AtomicRefCount usage_count_;
+ };
+
+ static int AllocateBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
+ static void ReleaseBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
+
+ void Initialize(AVCodecContext* codec_context,
+ VideoFrame::Format surface_format);
+ void Stop(AVCodecContext* codec_context);
+
+ // DisplayDone() is called when renderer has finished using a frame.
+ void DisplayDone(AVCodecContext* codec_context,
+ scoped_refptr<VideoFrame> video_frame);
+
+ // DecodeDone() is called after avcodec_video_decode() finish so that we can
+ // acquire a reference to the video frame before we hand it to the renderer.
+ scoped_refptr<VideoFrame> DecodeDone(AVCodecContext* codec_context,
+ AVFrame* av_frame);
+
+ private:
+ int InternalAllocateBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
+ void InternalReleaseBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
+
+ VideoFrame::Format surface_format_;
+
+ // This queue keeps reference count for all VideoFrame allocated.
+ std::deque<RefCountedAVFrame*> frame_pool_;
+
+ // This queue keeps per-AVCodecContext VideoFrame allocation that
+ // was available for recycling.
+ static const int kMaxFFmpegThreads = 3;
+ std::deque<RefCountedAVFrame*> available_frames_[kMaxFFmpegThreads];
+
+ // This map is used to map from AVCodecContext* to index to
+ // |available_frames_|, because ffmpeg-mt maintain multiple
+ // AVCodecContext (per thread).
+ std::map<void*, int> codec_index_map_;
+
+ // These function pointer store original ffmpeg AVCodecContext's
+ // get_buffer()/release_buffer() function pointer. We use these function
+ // to delegate the allocation request.
+ int (*get_buffer_)(struct AVCodecContext *c, AVFrame *pic);
+ void (*release_buffer_)(struct AVCodecContext *c, AVFrame *pic);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FFMPEG_VIDEO_ALLOCATOR_H_
diff --git a/media/media.gyp b/media/media.gyp
index 1f866ee..1ad1b8b 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -114,6 +114,8 @@
'filters/ffmpeg_glue.h',
'filters/ffmpeg_interfaces.cc',
'filters/ffmpeg_interfaces.h',
+ 'filters/ffmpeg_video_allocator.cc',
+ 'filters/ffmpeg_video_allocator.h',
'filters/ffmpeg_video_decode_engine.cc',
'filters/ffmpeg_video_decode_engine.h',
'filters/ffmpeg_video_decoder.cc',
diff --git a/media/tools/player_x11/gles_video_renderer.cc b/media/tools/player_x11/gles_video_renderer.cc
index f1942ba..a702c77 100644
--- a/media/tools/player_x11/gles_video_renderer.cc
+++ b/media/tools/player_x11/gles_video_renderer.cc
@@ -432,10 +432,17 @@ void GlesVideoRenderer::CreateTextureAndProgramEgl() {
scoped_refptr<media::VideoFrame> video_frame;
const base::TimeDelta kZero;
- media::VideoFrame:: CreatePrivateFrame(
+ // The data/strides are not relevant in this case.
+ uint8* data[media::VideoFrame::kMaxPlanes];
+ int32 strides[media::VideoFrame::kMaxPlanes];
+ memset(data, 0, sizeof(data));
+ memset(strides, 0, sizeof(strides));
+ media::VideoFrame:: CreateFrameExternal(
media::VideoFrame::TYPE_EGL_IMAGE,
media::VideoFrame::RGB565,
- width(), height(), kZero, kZero,
+ width(), height(), 3,
+ data, strides,
+ kZero, kZero,
egl_image,
&video_frame);
egl_frames_.push_back(std::make_pair(video_frame, texture));