summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 19:32:07 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 19:32:07 +0000
commitb37dc5ab810852a3055addad6d6cd9cc45080ed1 (patch)
tree16809303f61158d6e88a79db1f3d702d5a6b12b9 /media
parent4274861a828d85d3360d70f54bc3017f87f17502 (diff)
downloadchromium_src-b37dc5ab810852a3055addad6d6cd9cc45080ed1.zip
chromium_src-b37dc5ab810852a3055addad6d6cd9cc45080ed1.tar.gz
chromium_src-b37dc5ab810852a3055addad6d6cd9cc45080ed1.tar.bz2
Removing FFmpegVideoAllocator as it provides little benefit while increasing code complexity.
BUG=none TEST=media_unittests, media layout tests, ui_tests Review URL: http://codereview.chromium.org/6993018 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87830 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/media.gyp2
-rw-r--r--media/video/ffmpeg_video_allocator.cc181
-rw-r--r--media/video/ffmpeg_video_allocator.h101
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc89
-rw-r--r--media/video/ffmpeg_video_decode_engine.h8
5 files changed, 31 insertions, 350 deletions
diff --git a/media/media.gyp b/media/media.gyp
index b7560c1..e9ac3f1 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -185,8 +185,6 @@
'video/capture/video_capture_device_dummy.cc',
'video/capture/video_capture_device_dummy.h',
'video/capture/video_capture_types.h',
- 'video/ffmpeg_video_allocator.cc',
- 'video/ffmpeg_video_allocator.h',
'video/ffmpeg_video_decode_engine.cc',
'video/ffmpeg_video_decode_engine.h',
'video/picture.cc',
diff --git a/media/video/ffmpeg_video_allocator.cc b/media/video/ffmpeg_video_allocator.cc
deleted file mode 100644
index f51337d..0000000
--- a/media/video/ffmpeg_video_allocator.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/ffmpeg_video_allocator.h"
-
-#include "base/logging.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-
-// Because Chromium could be build with FFMPEG version other than FFMPEG-MT
-// by using GYP_DEFINES variable "use-system-ffmpeg". The following code will
-// not build with vanilla FFMPEG. We will fall back to "disable direct
-// rendering" when that happens.
-// TODO(jiesun): Actually we could do better than this: we should modify the
-// following code to work with vanilla FFMPEG.
-
-namespace media {
-
-FFmpegVideoAllocator::FFmpegVideoAllocator()
- : surface_format_(VideoFrame::INVALID),
- get_buffer_(NULL),
- release_buffer_(NULL) {
-}
-
-FFmpegVideoAllocator::~FFmpegVideoAllocator() {}
-
-void FFmpegVideoAllocator::Initialize(AVCodecContext* codec_context,
- VideoFrame::Format surface_format) {
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- surface_format_ = surface_format;
- get_buffer_ = codec_context->get_buffer;
- release_buffer_ = codec_context->release_buffer;
- codec_context->get_buffer = AllocateBuffer;
- codec_context->release_buffer = ReleaseBuffer;
- codec_context->opaque = this;
-#endif
-}
-
-void FFmpegVideoAllocator::Stop(AVCodecContext* codec_context) {
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- // Restore default buffer allocator functions.
- // This does not work actually, because in ffmpeg-mt, there are
- // multiple codec_context copies per threads. each context maintain
- // its own internal buffer pools.
- codec_context->get_buffer = get_buffer_;
- codec_context->release_buffer = release_buffer_;
-
- while (!frame_pool_.empty()) {
- RefCountedAVFrame* ffmpeg_video_frame = frame_pool_.front();
- frame_pool_.pop_front();
- ffmpeg_video_frame->av_frame_.opaque = NULL;
-
- // Reset per-context default buffer release functions.
- ffmpeg_video_frame->av_frame_.owner->release_buffer = release_buffer_;
- ffmpeg_video_frame->av_frame_.owner->get_buffer = get_buffer_;
- delete ffmpeg_video_frame;
- }
- for (int i = 0; i < kMaxFFmpegThreads; ++i)
- available_frames_[i].clear();
- codec_index_map_.clear();
-#endif
-}
-
-void FFmpegVideoAllocator::DisplayDone(
- AVCodecContext* codec_context,
- scoped_refptr<VideoFrame> video_frame) {
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- RefCountedAVFrame* ffmpeg_video_frame =
- reinterpret_cast<RefCountedAVFrame*>(video_frame->private_buffer());
- if (ffmpeg_video_frame->Release() == 0) {
- int index = codec_index_map_[ffmpeg_video_frame->av_frame_.owner];
- available_frames_[index].push_back(ffmpeg_video_frame);
- }
-#endif
-}
-
-scoped_refptr<VideoFrame> FFmpegVideoAllocator::DecodeDone(
- AVCodecContext* codec_context,
- AVFrame* av_frame) {
- scoped_refptr<VideoFrame> frame;
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- RefCountedAVFrame* ffmpeg_video_frame =
- reinterpret_cast<RefCountedAVFrame*>(av_frame->opaque);
- ffmpeg_video_frame->av_frame_ = *av_frame;
- ffmpeg_video_frame->AddRef();
-
- VideoFrame::CreateFrameExternal(
- VideoFrame::TYPE_SYSTEM_MEMORY, surface_format_,
- codec_context->width, codec_context->height, 3,
- av_frame->data,
- av_frame->linesize,
- kNoTimestamp,
- kNoTimestamp,
- ffmpeg_video_frame, // |private_buffer_|.
- &frame);
-#endif
- return frame;
-}
-
-int FFmpegVideoAllocator::AllocateBuffer(AVCodecContext* codec_context,
- AVFrame* av_frame) {
- FFmpegVideoAllocator* context =
- reinterpret_cast<FFmpegVideoAllocator*>(codec_context->opaque);
- return context->InternalAllocateBuffer(codec_context, av_frame);
-}
-
-void FFmpegVideoAllocator::ReleaseBuffer(AVCodecContext* codec_context,
- AVFrame* av_frame) {
- FFmpegVideoAllocator* context =
- reinterpret_cast<FFmpegVideoAllocator*>(codec_context->opaque);
- context->InternalReleaseBuffer(codec_context, av_frame);
-}
-
-int FFmpegVideoAllocator::InternalAllocateBuffer(
- AVCodecContext* codec_context, AVFrame* av_frame) {
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- // If |codec_context| is not yet known to us, we add it to our map.
- if (codec_index_map_.find(codec_context) == codec_index_map_.end()) {
- int next_index = codec_index_map_.size();
- codec_index_map_[codec_context] = next_index;
- CHECK_LE((int)codec_index_map_.size(), kMaxFFmpegThreads);
- }
-
- int index = codec_index_map_[codec_context];
-
- RefCountedAVFrame* ffmpeg_video_frame;
- if (available_frames_[index].empty()) {
- int ret = get_buffer_(codec_context, av_frame);
- CHECK_EQ(ret, 0);
- ffmpeg_video_frame = new RefCountedAVFrame(av_frame);
- frame_pool_.push_back(ffmpeg_video_frame);
- } else {
- ffmpeg_video_frame = available_frames_[index].front();
- available_frames_[index].pop_front();
- // We assume |get_buffer| immediately after |release_buffer| will
- // not trigger real buffer allocation. We just use it to fill the
- // correct value inside |pic|.
- release_buffer_(codec_context, &ffmpeg_video_frame->av_frame_);
- get_buffer_(codec_context, av_frame);
- ffmpeg_video_frame->av_frame_ = *av_frame;
- }
-
- av_frame->opaque = ffmpeg_video_frame;
- av_frame->type = FF_BUFFER_TYPE_USER;
- ffmpeg_video_frame->AddRef();
-#endif
- return 0;
-}
-
-void FFmpegVideoAllocator::InternalReleaseBuffer(
- AVCodecContext* codec_context, AVFrame* av_frame) {
-#if defined(FF_THREAD_FRAME) // Only defined in FFMPEG-MT.
- if (av_frame->opaque == NULL) {
- // This could happened in two scenario:
- // 1. FFMPEG-MT H264 codec seems to allocate one frame during
- // av_find_stream_info. This happens before we could even
- // install the custom allocator functions.
- // 2. When clean up time, we reset pic->opaque, and destruct ourselves.
- // We could not use our own release_buffer function because
- // handle-delayed-release() is called after we get destructed.
- release_buffer_(codec_context, av_frame);
- return;
- }
-
- RefCountedAVFrame* ffmpeg_video_frame =
- reinterpret_cast<RefCountedAVFrame*>(av_frame->opaque);
- release_buffer_(codec_context, av_frame);
-
- // This is required for get_buffer().
- ffmpeg_video_frame->av_frame_.data[0] = NULL;
- get_buffer_(codec_context, &ffmpeg_video_frame->av_frame_);
- int index = codec_index_map_[codec_context];
- if (ffmpeg_video_frame->Release() == 0)
- available_frames_[index].push_back(ffmpeg_video_frame);
-
- for (int k = 0; k < 4; ++k)
- av_frame->data[k] = NULL;
-#endif
-}
-
-} // namespace media
diff --git a/media/video/ffmpeg_video_allocator.h b/media/video/ffmpeg_video_allocator.h
deleted file mode 100644
index d70f019..0000000
--- a/media/video/ffmpeg_video_allocator.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_VIDEO_FFMPEG_VIDEO_ALLOCATOR_H_
-#define MEDIA_VIDEO_FFMPEG_VIDEO_ALLOCATOR_H_
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-
-#include <deque>
-#include <map>
-
-// FFmpeg types.
-struct AVCodecContext;
-struct AVFrame;
-struct AVStream;
-
-namespace media {
-
-class FFmpegVideoAllocator {
- public:
- FFmpegVideoAllocator();
- virtual ~FFmpegVideoAllocator();
-
- struct RefCountedAVFrame {
- explicit RefCountedAVFrame(AVFrame* av_frame)
- : av_frame_(*av_frame),
- usage_count_(0) {}
-
- // TODO(jiesun): we had commented out "DCHECK_EQ(usage_count_, 0);" here.
- // Because the way FFMPEG-MT handle release buffer in delayed fashion.
- // Probably we could wait FFMPEG-MT release all buffers before we callback
- // the flush completion.
- ~RefCountedAVFrame() {}
-
- void AddRef() {
- base::AtomicRefCountIncN(&usage_count_, 1);
- }
-
- bool Release() {
- return base::AtomicRefCountDecN(&usage_count_, 1);
- }
-
- // Technically AVFrame should *always* be heap-allocated via
- // avcodec_alloc_frame() otherwise (while rare) we can run into nasty binary
- // mismatch incompatibility stuff if people swap binaries (which might
- // happen with some Linux distributions). See http://crbug.com/77629.
- AVFrame av_frame_;
- base::AtomicRefCount usage_count_;
- };
-
- static int AllocateBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
- static void ReleaseBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
-
- void Initialize(AVCodecContext* codec_context,
- VideoFrame::Format surface_format);
- void Stop(AVCodecContext* codec_context);
-
- // DisplayDone() is called when renderer has finished using a frame.
- void DisplayDone(AVCodecContext* codec_context,
- scoped_refptr<VideoFrame> video_frame);
-
- // DecodeDone() is called after avcodec_video_decode() finish so that we can
- // acquire a reference to the video frame before we hand it to the renderer.
- scoped_refptr<VideoFrame> DecodeDone(AVCodecContext* codec_context,
- AVFrame* av_frame);
-
- private:
- int InternalAllocateBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
- void InternalReleaseBuffer(AVCodecContext* codec_context, AVFrame* av_frame);
-
- VideoFrame::Format surface_format_;
-
- // This queue keeps reference count for all VideoFrame allocated.
- std::deque<RefCountedAVFrame*> frame_pool_;
-
- // This queue keeps per-AVCodecContext VideoFrame allocation that
- // was available for recycling.
- static const int kMaxFFmpegThreads = 3;
- std::deque<RefCountedAVFrame*> available_frames_[kMaxFFmpegThreads];
-
- // This map is used to map from AVCodecContext* to index to
- // |available_frames_|, because ffmpeg-mt maintain multiple
- // AVCodecContext (per thread).
- std::map<AVCodecContext*, int> codec_index_map_;
-
- // These function pointers store the original AVCodecContext's
- // get_buffer()/release_buffer() function pointers. We use these functions
- // to delegate the allocation request.
- int (*get_buffer_)(AVCodecContext* c, AVFrame* pic);
- void (*release_buffer_)(AVCodecContext* c, AVFrame* pic);
-
- DISALLOW_COPY_AND_ASSIGN(FFmpegVideoAllocator);
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_FFMPEG_VIDEO_ALLOCATOR_H_
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index c1e1d63..149e595 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -14,7 +14,6 @@
#include "media/base/pipeline.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_demuxer.h"
-#include "media/video/ffmpeg_video_allocator.h"
namespace media {
@@ -23,7 +22,6 @@ FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
event_handler_(NULL),
frame_rate_numerator_(0),
frame_rate_denominator_(0),
- direct_rendering_(false),
pending_input_buffers_(0),
pending_output_buffers_(0),
output_eos_reached_(false),
@@ -43,8 +41,6 @@ void FFmpegVideoDecodeEngine::Initialize(
VideoDecodeEngine::EventHandler* event_handler,
VideoDecodeContext* context,
const VideoDecoderConfig& config) {
- allocator_.reset(new FFmpegVideoAllocator());
-
// Always try to use three threads for video decoding. There is little reason
// not to since current day CPUs tend to be multi-core and we measured
// performance benefits on older machines such as P4s with hyperthreading.
@@ -85,20 +81,10 @@ void FFmpegVideoDecodeEngine::Initialize(
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (codec) {
-#ifdef FF_THREAD_FRAME // Only defined in FFMPEG-MT.
- direct_rendering_ = codec->capabilities & CODEC_CAP_DR1 ? true : false;
-#endif
- if (direct_rendering_) {
- DVLOG(1) << "direct rendering is used";
- allocator_->Initialize(codec_context_, GetSurfaceFormat());
- }
- }
-
// TODO(fbarchard): Improve thread logic based on size / codec.
// TODO(fbarchard): Fix bug affecting video-cookie.html
int decode_threads = (codec_context_->codec_id == CODEC_ID_THEORA) ?
- 1 : kDecodeThreads;
+ 1 : kDecodeThreads;
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
@@ -123,22 +109,21 @@ void FFmpegVideoDecodeEngine::Initialize(
// If we do not have enough buffers, we will report error too.
bool buffer_allocated = true;
frame_queue_available_.clear();
- if (!direct_rendering_) {
- // Create output buffer pool when direct rendering is not used.
- for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
- scoped_refptr<VideoFrame> video_frame;
- VideoFrame::CreateFrame(VideoFrame::YV12,
- config.width(),
- config.height(),
- kNoTimestamp,
- kNoTimestamp,
- &video_frame);
- if (!video_frame.get()) {
- buffer_allocated = false;
- break;
- }
- frame_queue_available_.push_back(video_frame);
+
+ // Create output buffer pool when direct rendering is not used.
+ for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
+ scoped_refptr<VideoFrame> video_frame;
+ VideoFrame::CreateFrame(VideoFrame::YV12,
+ config.width(),
+ config.height(),
+ kNoTimestamp,
+ kNoTimestamp,
+ &video_frame);
+ if (!video_frame.get()) {
+ buffer_allocated = false;
+ break;
}
+ frame_queue_available_.push_back(video_frame);
}
if (codec &&
@@ -201,11 +186,8 @@ void FFmpegVideoDecodeEngine::ProduceVideoFrame(
// Increment pending output buffer count.
pending_output_buffers_++;
- // Return this frame to available pool or allocator after display.
- if (direct_rendering_)
- allocator_->DisplayDone(codec_context_, frame);
- else
- frame_queue_available_.push_back(frame);
+ // Return this frame to available pool after display.
+ frame_queue_available_.push_back(frame);
if (flush_pending_) {
TryToFinishPendingFlush();
@@ -295,25 +277,20 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
base::TimeDelta duration =
ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict);
- if (!direct_rendering_) {
- // Available frame is guaranteed, because we issue as much reads as
- // available frame, except the case of |frame_decoded| == 0, which
- // implies decoder order delay, and force us to read more inputs.
- DCHECK(frame_queue_available_.size());
- video_frame = frame_queue_available_.front();
- frame_queue_available_.pop_front();
-
- // Copy the frame data since FFmpeg reuses internal buffers for AVFrame
- // output, meaning the data is only valid until the next
- // avcodec_decode_video() call.
- size_t height = codec_context_->height;
- CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height);
- CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height);
- CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height);
- } else {
- // Get the VideoFrame from allocator which associate with av_frame_.
- video_frame = allocator_->DecodeDone(codec_context_, av_frame_.get());
- }
+ // Available frame is guaranteed, because we issue as much reads as
+ // available frame, except the case of |frame_decoded| == 0, which
+ // implies decoder order delay, and force us to read more inputs.
+ DCHECK(frame_queue_available_.size());
+ video_frame = frame_queue_available_.front();
+ frame_queue_available_.pop_front();
+
+ // Copy the frame data since FFmpeg reuses internal buffers for AVFrame
+ // output, meaning the data is only valid until the next
+ // avcodec_decode_video() call.
+ size_t height = codec_context_->height;
+ CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height);
+ CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height);
+ CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height);
video_frame->SetTimestamp(timestamp);
video_frame->SetDuration(duration);
@@ -323,10 +300,6 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
}
void FFmpegVideoDecodeEngine::Uninitialize() {
- if (direct_rendering_) {
- allocator_->Stop(codec_context_);
- }
-
event_handler_->OnUninitializeComplete();
}
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index 6742246..32eac45 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -17,8 +17,6 @@ struct AVFrame;
namespace media {
-class FFmpegVideoAllocator;
-
class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
public:
FFmpegVideoDecodeEngine();
@@ -50,12 +48,6 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
int frame_rate_numerator_;
int frame_rate_denominator_;
- // Whether direct rendering is used.
- bool direct_rendering_;
-
- // Used when direct rendering is used to recycle output buffers.
- scoped_ptr<FFmpegVideoAllocator> allocator_;
-
// Indicate how many buffers are pending on input port of this filter:
// Increment when engine receive one input packet from demuxer;
// Decrement when engine send one input packet to demuxer;