summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
Diffstat (limited to 'media')
-rw-r--r--media/base/composite_filter.cc20
-rw-r--r--media/base/composite_filter.h3
-rw-r--r--media/base/composite_filter_unittest.cc17
-rw-r--r--media/base/filter_collection.h1
-rw-r--r--media/base/filters.cc5
-rw-r--r--media/base/filters.h13
-rw-r--r--media/base/mock_filters.h2
-rw-r--r--media/base/pipeline_impl.cc32
-rw-r--r--media/base/pipeline_impl.h9
-rw-r--r--media/base/pipeline_impl_unittest.cc9
-rw-r--r--media/base/video_decoder_config.cc34
-rw-r--r--media/base/video_decoder_config.h32
-rw-r--r--media/base/video_frame.cc48
-rw-r--r--media/base/video_frame.h33
-rw-r--r--media/ffmpeg/ffmpeg_common.cc48
-rw-r--r--media/filters/ffmpeg_video_decoder.cc33
-rw-r--r--media/filters/ffmpeg_video_decoder.h2
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc33
-rw-r--r--media/filters/gpu_video_decoder.cc443
-rw-r--r--media/filters/gpu_video_decoder.h169
-rw-r--r--media/filters/video_renderer_base.cc22
-rw-r--r--media/media.gyp2
-rw-r--r--media/video/video_decode_accelerator.h24
23 files changed, 918 insertions, 116 deletions
diff --git a/media/base/composite_filter.cc b/media/base/composite_filter.cc
index 5c4b1c5..0b9e87e 100644
--- a/media/base/composite_filter.cc
+++ b/media/base/composite_filter.cc
@@ -57,6 +57,8 @@ CompositeFilter::~CompositeFilter() {
}
bool CompositeFilter::AddFilter(scoped_refptr<Filter> filter) {
+ // TODO(fischman,scherkus): s/bool/void/ the return type and CHECK on failure
+ // of the sanity-checks that return false today.
DCHECK_EQ(message_loop_, MessageLoop::current());
if (!filter.get() || state_ != kCreated || !host())
return false;
@@ -67,6 +69,22 @@ bool CompositeFilter::AddFilter(scoped_refptr<Filter> filter) {
return true;
}
+void CompositeFilter::RemoveFilter(scoped_refptr<Filter> filter) {
+ DCHECK_EQ(message_loop_, MessageLoop::current());
+ if (!filter.get() || state_ != kCreated || !host())
+ LOG(FATAL) << "Unknown filter, or in unexpected state.";
+
+ for (FilterVector::iterator it = filters_.begin();
+ it != filters_.end(); ++it) {
+ if (it->get() != filter.get())
+ continue;
+ filters_.erase(it);
+ filter->clear_host();
+ return;
+ }
+ NOTREACHED() << "Filter missing.";
+}
+
void CompositeFilter::set_host(FilterHost* host) {
DCHECK_EQ(message_loop_, MessageLoop::current());
DCHECK(host);
@@ -337,7 +355,6 @@ void CompositeFilter::SerialCallback() {
DispatchPendingCallback(status_);
return;
}
-
if (!filters_.empty())
sequence_index_++;
@@ -375,7 +392,6 @@ void CompositeFilter::ParallelCallback() {
void CompositeFilter::OnCallSequenceDone() {
State next_state = GetNextState(state_);
-
if (next_state == kInvalid) {
// We somehow got into an unexpected state.
ChangeState(kError);
diff --git a/media/base/composite_filter.h b/media/base/composite_filter.h
index 4b35be3..9821d03f 100644
--- a/media/base/composite_filter.h
+++ b/media/base/composite_filter.h
@@ -27,6 +27,9 @@ class MEDIA_EXPORT CompositeFilter : public Filter {
// because the composite is in the wrong state.
bool AddFilter(scoped_refptr<Filter> filter);
+ // Undoes AddFilter's actions. CHECK-fails if |filter| is unknown.
+ void RemoveFilter(scoped_refptr<Filter> filter);
+
// media::Filter methods.
virtual void set_host(FilterHost* host) OVERRIDE;
virtual FilterHost* host() OVERRIDE;
diff --git a/media/base/composite_filter_unittest.cc b/media/base/composite_filter_unittest.cc
index 23b7f56..6b7f219 100644
--- a/media/base/composite_filter_unittest.cc
+++ b/media/base/composite_filter_unittest.cc
@@ -379,8 +379,8 @@ TEST_F(CompositeFilterTest, TestAddFilterFailCases) {
EXPECT_FALSE(composite_->AddFilter(filter));
}
-// Test successful AddFilter() cases.
-TEST_F(CompositeFilterTest, TestAddFilter) {
+// Test successful {Add,Remove}Filter() cases.
+TEST_F(CompositeFilterTest, TestAddRemoveFilter) {
composite_->set_host(mock_filter_host_.get());
// Add a filter.
@@ -388,8 +388,19 @@ TEST_F(CompositeFilterTest, TestAddFilter) {
EXPECT_EQ(NULL, filter->host());
EXPECT_TRUE(composite_->AddFilter(filter));
-
EXPECT_TRUE(filter->host() != NULL);
+ composite_->RemoveFilter(filter);
+ EXPECT_TRUE(filter->host() == NULL);
+}
+
+class CompositeFilterDeathTest : public CompositeFilterTest {};
+
+// Test failure of RemoveFilter() on an unknown filter.
+TEST_F(CompositeFilterDeathTest, TestRemoveUnknownFilter) {
+ composite_->set_host(mock_filter_host_.get());
+ // Remove unknown filter.
+ scoped_refptr<StrictMock<MockFilter> > filter = new StrictMock<MockFilter>();
+ EXPECT_DEATH(composite_->RemoveFilter(filter), "");
}
TEST_F(CompositeFilterTest, TestPlay) {
diff --git a/media/base/filter_collection.h b/media/base/filter_collection.h
index 97e7232..edf6b29 100644
--- a/media/base/filter_collection.h
+++ b/media/base/filter_collection.h
@@ -40,6 +40,7 @@ class MEDIA_EXPORT FilterCollection {
// Selects a filter of the specified type from the collection.
// If the required filter cannot be found, NULL is returned.
// If a filter is returned it is removed from the collection.
+ // Filters are selected in FIFO order.
void SelectVideoDecoder(scoped_refptr<VideoDecoder>* filter_out);
void SelectAudioDecoder(scoped_refptr<AudioDecoder>* filter_out);
void SelectVideoRenderer(scoped_refptr<VideoRenderer>* filter_out);
diff --git a/media/base/filters.cc b/media/base/filters.cc
index 877f391..ace7d4e 100644
--- a/media/base/filters.cc
+++ b/media/base/filters.cc
@@ -29,6 +29,11 @@ Filter::Filter() : host_(NULL) {}
Filter::~Filter() {}
+void Filter::clear_host() {
+ DCHECK(host_);
+ host_ = NULL;
+}
+
void Filter::set_host(FilterHost* host) {
DCHECK(host);
DCHECK(!host_);
diff --git a/media/base/filters.h b/media/base/filters.h
index e464202..5cd4cf8 100644
--- a/media/base/filters.h
+++ b/media/base/filters.h
@@ -62,8 +62,8 @@ enum Preload {
// Used for completing asynchronous methods.
typedef base::Callback<void(PipelineStatus)> FilterStatusCB;
-// This function copies |*cb|, calls Reset() on |*cb|, and then calls Run()
-// on the copy. This is used in the common case where you need to clear
+// These functions copy |*cb|, call Reset() on |*cb|, and then call Run()
+// on the copy. This is used in the common case where you need to clear
// a callback member variable before running the callback.
MEDIA_EXPORT void ResetAndRunCB(FilterStatusCB* cb, PipelineStatus status);
MEDIA_EXPORT void ResetAndRunCB(base::Closure* cb);
@@ -81,6 +81,10 @@ class MEDIA_EXPORT Filter : public base::RefCountedThreadSafe<Filter> {
// to be released before the host object is destroyed by the pipeline.
virtual void set_host(FilterHost* host);
+ // Clear |host_| to signal abandonment. Must be called after set_host() and
+ // before any state-changing method below.
+ virtual void clear_host();
+
virtual FilterHost* host();
// The pipeline has resumed playback. Filters can continue requesting reads.
@@ -164,9 +168,8 @@ class MEDIA_EXPORT VideoDecoder : public Filter {
// Initialize a VideoDecoder with the given DemuxerStream, executing the
// callback upon completion.
// stats_callback is used to update global pipeline statistics.
- //
- // TODO(scherkus): switch to PipelineStatus callback.
- virtual void Initialize(DemuxerStream* stream, const base::Closure& callback,
+ virtual void Initialize(DemuxerStream* stream,
+ const PipelineStatusCB& callback,
const StatisticsCallback& stats_callback) = 0;
// Request a frame to be decoded and returned via the provided callback.
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index 13a8d2c..28817e9 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -188,7 +188,7 @@ class MockVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
MOCK_METHOD3(Initialize, void(DemuxerStream* stream,
- const base::Closure& callback,
+ const PipelineStatusCB& callback,
const StatisticsCallback& stats_callback));
MOCK_METHOD1(Read, void(const ReadCB& callback));
MOCK_METHOD0(natural_size, const gfx::Size&());
diff --git a/media/base/pipeline_impl.cc b/media/base/pipeline_impl.cc
index d4c1b8e..aa5ac3b 100644
--- a/media/base/pipeline_impl.cc
+++ b/media/base/pipeline_impl.cc
@@ -586,10 +586,10 @@ void PipelineImpl::DisableAudioRenderer() {
}
// Called from any thread.
-void PipelineImpl::OnFilterInitialize() {
+void PipelineImpl::OnFilterInitialize(PipelineStatus status) {
// Continue the initialize task by proceeding to the next stage.
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&PipelineImpl::InitializeTask, this));
+ message_loop_->PostTask(
+ FROM_HERE, base::Bind(&PipelineImpl::InitializeTask, this, status));
}
// Called from any thread.
@@ -661,9 +661,21 @@ void PipelineImpl::StartTask(FilterCollection* filter_collection,
// TODO(hclam): InitializeTask() is now starting the pipeline asynchronously. It
// works like a big state change table. If we no longer need to start filters
// in order, we need to get rid of all the state change.
-void PipelineImpl::InitializeTask() {
+void PipelineImpl::InitializeTask(PipelineStatus last_stage_status) {
DCHECK_EQ(MessageLoop::current(), message_loop_);
+ if (last_stage_status != PIPELINE_OK) {
+ // Currently only VideoDecoders have a recoverable error code.
+ if (state_ == kInitVideoDecoder &&
+ last_stage_status == DECODER_ERROR_NOT_SUPPORTED) {
+ pipeline_init_state_->composite_->RemoveFilter(
+ pipeline_init_state_->video_decoder_.get());
+ state_ = kInitAudioRenderer;
+ } else {
+ SetError(last_stage_status);
+ }
+ }
+
// If we have received the stop or error signal, return immediately.
if (IsPipelineStopPending() || IsPipelineStopped() || !IsPipelineOk())
return;
@@ -1111,10 +1123,8 @@ void PipelineImpl::FinishDestroyingFiltersTask() {
bool PipelineImpl::PrepareFilter(scoped_refptr<Filter> filter) {
bool ret = pipeline_init_state_->composite_->AddFilter(filter.get());
-
- if (!ret) {
+ if (!ret)
SetError(PIPELINE_ERROR_INITIALIZATION_FAILED);
- }
return ret;
}
@@ -1155,7 +1165,7 @@ void PipelineImpl::OnDemuxerBuilt(PipelineStatus status, Demuxer* demuxer) {
clock_->SetTime(demuxer_->GetStartTime());
}
- OnFilterInitialize();
+ OnFilterInitialize(PIPELINE_OK);
}
bool PipelineImpl::InitializeAudioDecoder(
@@ -1183,7 +1193,7 @@ bool PipelineImpl::InitializeAudioDecoder(
pipeline_init_state_->audio_decoder_ = audio_decoder;
audio_decoder->Initialize(
stream,
- base::Bind(&PipelineImpl::OnFilterInitialize, this),
+ base::Bind(&PipelineImpl::OnFilterInitialize, this, PIPELINE_OK),
base::Bind(&PipelineImpl::OnUpdateStatistics, this));
return true;
}
@@ -1240,7 +1250,7 @@ bool PipelineImpl::InitializeAudioRenderer(
audio_renderer_->Initialize(
decoder,
- base::Bind(&PipelineImpl::OnFilterInitialize, this),
+ base::Bind(&PipelineImpl::OnFilterInitialize, this, PIPELINE_OK),
base::Bind(&PipelineImpl::OnAudioUnderflow, this));
return true;
}
@@ -1264,7 +1274,7 @@ bool PipelineImpl::InitializeVideoRenderer(
video_renderer_->Initialize(
decoder,
- base::Bind(&PipelineImpl::OnFilterInitialize, this),
+ base::Bind(&PipelineImpl::OnFilterInitialize, this, PIPELINE_OK),
base::Bind(&PipelineImpl::OnUpdateStatistics, this));
return true;
}
diff --git a/media/base/pipeline_impl.h b/media/base/pipeline_impl.h
index 5fb8f82..9426688 100644
--- a/media/base/pipeline_impl.h
+++ b/media/base/pipeline_impl.h
@@ -206,8 +206,8 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline, public FilterHost {
virtual void SetCurrentReadPosition(int64 offset) OVERRIDE;
virtual int64 GetCurrentReadPosition() OVERRIDE;
- // Callback executed by filters upon completing initialization.
- void OnFilterInitialize();
+ // Callbacks executed by filters upon completing initialization.
+ void OnFilterInitialize(PipelineStatus status);
// Callback executed by filters upon completing Play(), Pause(), or Stop().
void OnFilterStateTransition();
@@ -231,8 +231,9 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline, public FilterHost {
// InitializeTask() performs initialization in multiple passes. It is executed
// as a result of calling Start() or InitializationComplete() that advances
// initialization to the next state. It works as a hub of state transition for
- // initialization.
- void InitializeTask();
+ // initialization. One stage communicates its status to the next through
+ // |last_stage_status|.
+ void InitializeTask(PipelineStatus last_stage_status);
// Stops and destroys all filters, placing the pipeline in the kStopped state.
void StopTask(const PipelineStatusCB& stop_callback);
diff --git a/media/base/pipeline_impl_unittest.cc b/media/base/pipeline_impl_unittest.cc
index fc9692e..e27bdda 100644
--- a/media/base/pipeline_impl_unittest.cc
+++ b/media/base/pipeline_impl_unittest.cc
@@ -21,11 +21,13 @@ using ::testing::_;
using ::testing::DeleteArg;
using ::testing::InSequence;
using ::testing::Invoke;
+using ::testing::InvokeArgument;
using ::testing::Mock;
using ::testing::NotNull;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrictMock;
+using ::testing::WithArg;
namespace media {
@@ -52,6 +54,11 @@ class CallbackHelper {
DISALLOW_COPY_AND_ASSIGN(CallbackHelper);
};
+// Run |cb| w/ OK status.
+static void RunPipelineStatusOKCB(const PipelineStatusCB& cb) {
+ cb.Run(PIPELINE_OK);
+}
+
// TODO(scherkus): even though some filters are initialized on separate
// threads these test aren't flaky... why? It's because filters' Initialize()
// is executed on |message_loop_| and the mock filters instantly call
@@ -126,7 +133,7 @@ class PipelineImplTest : public ::testing::Test {
void InitializeVideoDecoder(MockDemuxerStream* stream) {
EXPECT_CALL(*mocks_->video_decoder(),
Initialize(stream, _, _))
- .WillOnce(Invoke(&RunFilterCallback3));
+ .WillOnce(WithArg<1>(Invoke(&RunPipelineStatusOKCB)));
EXPECT_CALL(*mocks_->video_decoder(), SetPlaybackRate(0.0f));
EXPECT_CALL(*mocks_->video_decoder(),
Seek(mocks_->demuxer()->GetStartTime(), _))
diff --git a/media/base/video_decoder_config.cc b/media/base/video_decoder_config.cc
index ee334d3..07bf9a5 100644
--- a/media/base/video_decoder_config.cc
+++ b/media/base/video_decoder_config.cc
@@ -14,6 +14,7 @@ namespace media {
VideoDecoderConfig::VideoDecoderConfig()
: codec_(kUnknownVideoCodec),
+ profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
format_(VideoFrame::INVALID),
frame_rate_numerator_(0),
frame_rate_denominator_(0),
@@ -23,6 +24,7 @@ VideoDecoderConfig::VideoDecoderConfig()
}
VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
+ VideoCodecProfile profile,
VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -32,7 +34,7 @@ VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
int aspect_ratio_denominator,
const uint8* extra_data,
size_t extra_data_size) {
- Initialize(codec, format, coded_size, visible_rect,
+ Initialize(codec, profile, format, coded_size, visible_rect,
frame_rate_numerator, frame_rate_denominator,
aspect_ratio_numerator, aspect_ratio_denominator,
extra_data, extra_data_size);
@@ -62,7 +64,9 @@ static void UmaHistogramAspectRatio(const char* name, const T& size) {
kCommonAspectRatios100, arraysize(kCommonAspectRatios100)));
}
-void VideoDecoderConfig::Initialize(VideoCodec codec, VideoFrame::Format format,
+void VideoDecoderConfig::Initialize(VideoCodec codec,
+ VideoCodecProfile profile,
+ VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
int frame_rate_numerator,
@@ -74,12 +78,15 @@ void VideoDecoderConfig::Initialize(VideoCodec codec, VideoFrame::Format format,
CHECK((extra_data_size != 0) == (extra_data != NULL));
UMA_HISTOGRAM_ENUMERATION("Media.VideoCodec", codec, kVideoCodecMax + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCodecProfile", profile,
+ VIDEO_CODEC_PROFILE_MAX + 1);
UMA_HISTOGRAM_COUNTS_10000("Media.VideoCodedWidth", coded_size.width());
UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
codec_ = codec;
+ profile_ = profile;
format_ = format;
coded_size_ = coded_size;
visible_rect_ = visible_rect;
@@ -125,10 +132,33 @@ bool VideoDecoderConfig::IsValidConfig() const {
natural_size_.GetArea() <= limits::kMaxCanvas;
}
+std::string VideoDecoderConfig::AsHumanReadableString() const {
+ std::ostringstream s;
+ s << "codec: " << codec()
+ << " format: " << format()
+ << " coded size: [" << coded_size().width()
+ << "," << coded_size().height() << "]"
+ << " visible rect: [" << visible_rect().x()
+ << "," << visible_rect().y()
+ << "," << visible_rect().width()
+ << "," << visible_rect().height() << "]"
+ << " natural size: [" << natural_size().width()
+ << "," << natural_size().height() << "]"
+ << " frame rate: " << frame_rate_numerator()
+ << "/" << frame_rate_denominator()
+ << " aspect ratio: " << aspect_ratio_numerator()
+ << "/" << aspect_ratio_denominator();
+ return s.str();
+}
+
VideoCodec VideoDecoderConfig::codec() const {
return codec_;
}
+VideoCodecProfile VideoDecoderConfig::profile() const {
+ return profile_;
+}
+
VideoFrame::Format VideoDecoderConfig::format() const {
return format_;
}
diff --git a/media/base/video_decoder_config.h b/media/base/video_decoder_config.h
index b02bee8..28b1106 100644
--- a/media/base/video_decoder_config.h
+++ b/media/base/video_decoder_config.h
@@ -14,7 +14,7 @@
namespace media {
-enum VideoCodec {
+enum MEDIA_EXPORT VideoCodec {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a codec replace it with a dummy value; when adding a
// codec, do so at the bottom (and update kVideoCodecMax).
@@ -33,6 +33,28 @@ enum VideoCodec {
kVideoCodecMax = kCodecVP8 // Must equal the last "real" codec above.
};
+// Video stream profile. This *must* match PP_VideoDecoder_Profile.
+enum MEDIA_EXPORT VideoCodecProfile {
+ // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
+ // for example), and keep the values for a particular format grouped
+ // together for clarity.
+ VIDEO_CODEC_PROFILE_UNKNOWN = -1,
+ H264PROFILE_MIN = 0,
+ H264PROFILE_BASELINE = H264PROFILE_MIN,
+ H264PROFILE_MAIN,
+ H264PROFILE_EXTENDED,
+ H264PROFILE_HIGH,
+ H264PROFILE_HIGH10PROFILE,
+ H264PROFILE_HIGH422PROFILE,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE,
+ H264PROFILE_SCALABLEBASELINE,
+ H264PROFILE_SCALABLEHIGH,
+ H264PROFILE_STEREOHIGH,
+ H264PROFILE_MULTIVIEWHIGH,
+ H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
+ VIDEO_CODEC_PROFILE_MAX = H264PROFILE_MAX,
+};
+
class MEDIA_EXPORT VideoDecoderConfig {
public:
// Constructs an uninitialized object. Clients should call Initialize() with
@@ -42,6 +64,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
// Constructs an initialized object. It is acceptable to pass in NULL for
// |extra_data|, otherwise the memory is copied.
VideoDecoderConfig(VideoCodec codec,
+ VideoCodecProfile profile,
VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -53,6 +76,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
// Resets the internal state of this object.
void Initialize(VideoCodec codec,
+ VideoCodecProfile profile,
VideoFrame::Format format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -64,7 +88,12 @@ class MEDIA_EXPORT VideoDecoderConfig {
// otherwise.
bool IsValidConfig() const;
+ // Returns a human-readable string describing |*this|. For debugging & test
+ // output only.
+ std::string AsHumanReadableString() const;
+
VideoCodec codec() const;
+ VideoCodecProfile profile() const;
// Video format used to determine YUV buffer sizes.
VideoFrame::Format format() const;
@@ -102,6 +131,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
private:
VideoCodec codec_;
+ VideoCodecProfile profile_;
VideoFrame::Format format_;
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index a0dc579..29c4223 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -17,9 +17,8 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
base::TimeDelta duration) {
DCHECK(width > 0 && height > 0);
DCHECK(width * height < 100000000);
- scoped_refptr<VideoFrame> frame(new VideoFrame(format, width, height));
- frame->SetTimestamp(timestamp);
- frame->SetDuration(duration);
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, width, height, timestamp, duration));
switch (format) {
case VideoFrame::RGB555:
case VideoFrame::RGB565:
@@ -47,8 +46,25 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
}
// static
+scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
+ uint32 texture_id,
+ size_t width,
+ size_t height,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ const base::Closure& no_longer_needed) {
+ scoped_refptr<VideoFrame> frame(
+ new VideoFrame(NATIVE_TEXTURE, width, height, timestamp, duration));
+ frame->planes_ = 0;
+ frame->texture_id_ = texture_id;
+ frame->texture_no_longer_needed_ = no_longer_needed;
+ return frame;
+}
+
+// static
scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() {
- return new VideoFrame(VideoFrame::EMPTY, 0, 0);
+ return new VideoFrame(
+ VideoFrame::EMPTY, 0, 0, base::TimeDelta(), base::TimeDelta());
}
// static
@@ -138,16 +154,26 @@ void VideoFrame::AllocateYUV() {
VideoFrame::VideoFrame(VideoFrame::Format format,
size_t width,
- size_t height)
+ size_t height,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration)
: format_(format),
width_(width),
height_(height),
- planes_(0) {
+ planes_(0),
+ texture_id_(0) {
+ SetTimestamp(timestamp);
+ SetDuration(duration);
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
}
VideoFrame::~VideoFrame() {
+ if (format_ == NATIVE_TEXTURE && !texture_no_longer_needed_.is_null()) {
+ texture_no_longer_needed_.Run();
+ texture_no_longer_needed_.Reset();
+ }
+
// In multi-plane allocations, only a single block of memory is allocated
// on the heap, and other |data| pointers point inside the same, single block
// so just delete index 0.
@@ -167,6 +193,10 @@ bool VideoFrame::IsValidPlane(size_t plane) const {
case YV16:
return plane == kYPlane || plane == kUPlane || plane == kVPlane;
+ case NATIVE_TEXTURE:
+ NOTREACHED() << "NATIVE_TEXTUREs don't use plane-related methods!";
+ return false;
+
default:
break;
}
@@ -236,6 +266,12 @@ uint8* VideoFrame::data(size_t plane) const {
return data_[plane];
}
+uint32 VideoFrame::texture_id() const {
+ DCHECK_EQ(format_, NATIVE_TEXTURE);
+ DCHECK_EQ(planes_, 0U);
+ return texture_id_;
+}
+
bool VideoFrame::IsEndOfStream() const {
return format_ == VideoFrame::EMPTY;
}
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index ab96544..1801077 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_VIDEO_FRAME_H_
#define MEDIA_BASE_VIDEO_FRAME_H_
+#include "base/callback.h"
#include "media/base/buffers.h"
namespace media {
@@ -25,6 +26,7 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
// Surface formats roughly based on FOURCC labels, see:
// http://www.fourcc.org/rgb.php
// http://www.fourcc.org/yuv.php
+ // Keep in sync with WebKit::WebVideoFrame!
enum Format {
INVALID, // Invalid format value. Used for error reporting.
RGB555, // 16bpp RGB packed 5:5:5
@@ -38,6 +40,7 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
EMPTY, // An empty frame.
ASCII, // A frame with ASCII content. For testing only.
I420, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
+ NATIVE_TEXTURE, // Opaque native texture. Pixel-format agnostic.
};
// Creates a new frame in system memory with given parameters. Buffers for
@@ -49,6 +52,16 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
base::TimeDelta timestamp,
base::TimeDelta duration);
+ // Wraps a native texture of the given parameters with a VideoFrame. When the
+ // frame is destroyed |no_longer_needed.Run()| will be called.
+ static scoped_refptr<VideoFrame> WrapNativeTexture(
+ uint32 texture_id,
+ size_t width,
+ size_t height,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ const base::Closure& no_longer_needed);
+
// Creates a frame with format equals to VideoFrame::EMPTY, width, height
// timestamp and duration are all 0.
static scoped_refptr<VideoFrame> CreateEmptyFrame();
@@ -78,14 +91,20 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
// VideoFrame object and must not be freed by the caller.
uint8* data(size_t plane) const;
+ // Returns the ID of the native texture wrapped by this frame. Only valid to
+ // call if this is a NATIVE_TEXTURE frame.
+ uint32 texture_id() const;
+
// StreamSample interface.
virtual bool IsEndOfStream() const OVERRIDE;
- protected:
+ private:
// Clients must use the static CreateFrame() method to create a new frame.
VideoFrame(Format format,
size_t video_width,
- size_t video_height);
+ size_t video_height,
+ base::TimeDelta timestamp,
+ base::TimeDelta duration);
virtual ~VideoFrame();
@@ -103,8 +122,8 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
size_t width_;
size_t height_;
- // Number of planes, typically 1 for packed RGB formats and 3 for planar
- // YUV formats.
+ // Number of planes, typically 1 for packed RGB formats, 3 for planar
+ // YUV formats, and 0 for native textures.
size_t planes_;
// Array of strides for each plane, typically greater or equal to the width
@@ -115,7 +134,11 @@ class MEDIA_EXPORT VideoFrame : public StreamSample {
// Array of data pointers to each plane.
uint8* data_[kMaxPlanes];
- DISALLOW_COPY_AND_ASSIGN(VideoFrame);
+ // Native texture ID, if this is a NATIVE_TEXTURE frame.
+ uint32 texture_id_;
+ base::Closure texture_no_longer_needed_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
};
} // namespace media
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index 396ee03..4f3dd0f 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -127,6 +127,52 @@ static CodecID VideoCodecToCodecID(VideoCodec video_codec) {
return CODEC_ID_NONE;
}
+static VideoCodecProfile ProfileIDToVideoCodecProfile(int profile) {
+ // Clear out the CONSTRAINED & INTRA flags which are strict subsets of the
+ // corresponding profiles with which they're used.
+ profile &= ~FF_PROFILE_H264_CONSTRAINED;
+ profile &= ~FF_PROFILE_H264_INTRA;
+ switch (profile) {
+ case FF_PROFILE_H264_BASELINE:
+ return H264PROFILE_BASELINE;
+ case FF_PROFILE_H264_MAIN:
+ return H264PROFILE_MAIN;
+ case FF_PROFILE_H264_EXTENDED:
+ return H264PROFILE_EXTENDED;
+ case FF_PROFILE_H264_HIGH:
+ return H264PROFILE_HIGH;
+ case FF_PROFILE_H264_HIGH_10:
+ return H264PROFILE_HIGH10PROFILE;
+ case FF_PROFILE_H264_HIGH_422:
+ return H264PROFILE_HIGH422PROFILE;
+ case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
+ return H264PROFILE_HIGH444PREDICTIVEPROFILE;
+ default:
+ return VIDEO_CODEC_PROFILE_UNKNOWN;
+ }
+}
+
+static int VideoCodecProfileToProfileID(VideoCodecProfile profile) {
+ switch (profile) {
+ case H264PROFILE_BASELINE:
+ return FF_PROFILE_H264_BASELINE;
+ case H264PROFILE_MAIN:
+ return FF_PROFILE_H264_MAIN;
+ case H264PROFILE_EXTENDED:
+ return FF_PROFILE_H264_EXTENDED;
+ case H264PROFILE_HIGH:
+ return FF_PROFILE_H264_HIGH;
+ case H264PROFILE_HIGH10PROFILE:
+ return FF_PROFILE_H264_HIGH_10;
+ case H264PROFILE_HIGH422PROFILE:
+ return FF_PROFILE_H264_HIGH_422;
+ case H264PROFILE_HIGH444PREDICTIVEPROFILE:
+ return FF_PROFILE_H264_HIGH_444_PREDICTIVE;
+ default:
+ return FF_PROFILE_UNKNOWN;
+ }
+}
+
void AVCodecContextToAudioDecoderConfig(
const AVCodecContext* codec_context,
AudioDecoderConfig* config) {
@@ -204,6 +250,7 @@ void AVStreamToVideoDecoderConfig(
aspect_ratio = stream->codec->sample_aspect_ratio;
config->Initialize(CodecIDToVideoCodec(stream->codec->codec_id),
+ ProfileIDToVideoCodecProfile(stream->codec->profile),
PixelFormatToVideoFormat(stream->codec->pix_fmt),
coded_size, visible_rect,
stream->r_frame_rate.num,
@@ -219,6 +266,7 @@ void VideoDecoderConfigToAVCodecContext(
AVCodecContext* codec_context) {
codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
codec_context->codec_id = VideoCodecToCodecID(config.codec());
+ codec_context->profile = VideoCodecProfileToProfileID(config.profile());
codec_context->coded_width = config.coded_size().width();
codec_context->coded_height = config.coded_size().height();
codec_context->pix_fmt = VideoFormatToPixelFormat(config.format());
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 39d9e39..66b25b6 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -64,7 +64,7 @@ FFmpegVideoDecoder::~FFmpegVideoDecoder() {
}
void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
- const base::Closure& callback,
+ const PipelineStatusCB& callback,
const StatisticsCallback& stats_callback) {
if (MessageLoop::current() != message_loop_) {
message_loop_->PostTask(FROM_HERE, base::Bind(
@@ -76,8 +76,7 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
DCHECK(!demuxer_stream_);
if (!demuxer_stream) {
- host()->SetError(PIPELINE_ERROR_DECODE);
- callback.Run();
+ callback.Run(PIPELINE_ERROR_DECODE);
return;
}
@@ -89,24 +88,8 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
// TODO(scherkus): this check should go in PipelineImpl prior to creating
// decoder objects.
if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid video stream -"
- << " codec: " << config.codec()
- << " format: " << config.format()
- << " coded size: [" << config.coded_size().width()
- << "," << config.coded_size().height() << "]"
- << " visible rect: [" << config.visible_rect().x()
- << "," << config.visible_rect().y()
- << "," << config.visible_rect().width()
- << "," << config.visible_rect().height() << "]"
- << " natural size: [" << config.natural_size().width()
- << "," << config.natural_size().height() << "]"
- << " frame rate: " << config.frame_rate_numerator()
- << "/" << config.frame_rate_denominator()
- << " aspect ratio: " << config.aspect_ratio_numerator()
- << "/" << config.aspect_ratio_denominator();
-
- host()->SetError(PIPELINE_ERROR_DECODE);
- callback.Run();
+ DLOG(ERROR) << "Invalid video stream - " << config.AsHumanReadableString();
+ callback.Run(PIPELINE_ERROR_DECODE);
return;
}
@@ -122,14 +105,12 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (!codec) {
- host()->SetError(PIPELINE_ERROR_DECODE);
- callback.Run();
+ callback.Run(PIPELINE_ERROR_DECODE);
return;
}
if (avcodec_open(codec_context_, codec) < 0) {
- host()->SetError(PIPELINE_ERROR_DECODE);
- callback.Run();
+ callback.Run(PIPELINE_ERROR_DECODE);
return;
}
@@ -140,7 +121,7 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
natural_size_ = config.natural_size();
frame_rate_numerator_ = config.frame_rate_numerator();
frame_rate_denominator_ = config.frame_rate_denominator();
- callback.Run();
+ callback.Run(PIPELINE_OK);
}
void FFmpegVideoDecoder::Stop(const base::Closure& callback) {
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index 34cf700..c5a412f 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -32,7 +32,7 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
virtual void Initialize(DemuxerStream* demuxer_stream,
- const base::Closure& callback,
+ const PipelineStatusCB& callback,
const StatisticsCallback& stats_callback) OVERRIDE;
virtual void Read(const ReadCB& callback) OVERRIDE;
virtual const gfx::Size& natural_size() OVERRIDE;
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 30ed2b4..b072993 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -58,7 +58,8 @@ class FFmpegVideoDecoderTest : public testing::Test {
ReadTestDataFile("vp8-I-frame-320x240", &i_frame_buffer_);
ReadTestDataFile("vp8-corrupt-I-frame", &corrupt_i_frame_buffer_);
- config_.Initialize(kCodecVP8, kVideoFormat, kCodedSize, kVisibleRect,
+ config_.Initialize(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
+ kVideoFormat, kCodedSize, kVisibleRect,
kFrameRate.num, kFrameRate.den,
kAspectRatio.num, kAspectRatio.den,
NULL, 0);
@@ -70,17 +71,22 @@ class FFmpegVideoDecoderTest : public testing::Test {
InitializeWithConfig(config_);
}
- void InitializeWithConfig(const VideoDecoderConfig& config) {
+ void InitializeWithConfigAndStatus(const VideoDecoderConfig& config,
+ PipelineStatus status) {
EXPECT_CALL(*demuxer_, video_decoder_config())
.WillOnce(ReturnRef(config));
- decoder_->Initialize(demuxer_, NewExpectedClosure(),
+ decoder_->Initialize(demuxer_, NewExpectedStatusCB(status),
base::Bind(&MockStatisticsCallback::OnStatistics,
base::Unretained(&statistics_callback_)));
message_loop_.RunAllPending();
}
+ void InitializeWithConfig(const VideoDecoderConfig& config) {
+ InitializeWithConfigAndStatus(config, PIPELINE_OK);
+ }
+
void Pause() {
decoder_->Pause(NewExpectedClosure());
message_loop_.RunAllPending();
@@ -242,38 +248,35 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_Normal) {
TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedDecoder) {
// Test avcodec_find_decoder() returning NULL.
- VideoDecoderConfig config(kUnknownVideoCodec, kVideoFormat,
+ VideoDecoderConfig config(kUnknownVideoCodec, VIDEO_CODEC_PROFILE_UNKNOWN,
+ kVideoFormat,
kCodedSize, kVisibleRect,
kFrameRate.num, kFrameRate.den,
kAspectRatio.num, kAspectRatio.den,
NULL, 0);
-
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
- InitializeWithConfig(config);
+ InitializeWithConfigAndStatus(config, PIPELINE_ERROR_DECODE);
}
TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedPixelFormat) {
// Ensure decoder handles unsupport pixel formats without crashing.
- VideoDecoderConfig config(kCodecVP8, VideoFrame::INVALID,
+ VideoDecoderConfig config(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
+ VideoFrame::INVALID,
kCodedSize, kVisibleRect,
kFrameRate.num, kFrameRate.den,
kAspectRatio.num, kAspectRatio.den,
NULL, 0);
-
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
- InitializeWithConfig(config);
+ InitializeWithConfigAndStatus(config, PIPELINE_ERROR_DECODE);
}
TEST_F(FFmpegVideoDecoderTest, Initialize_OpenDecoderFails) {
// Specify Theora w/o extra data so that avcodec_open() fails.
- VideoDecoderConfig config(kCodecTheora, kVideoFormat,
+ VideoDecoderConfig config(kCodecTheora, VIDEO_CODEC_PROFILE_UNKNOWN,
+ kVideoFormat,
kCodedSize, kVisibleRect,
kFrameRate.num, kFrameRate.den,
kAspectRatio.num, kAspectRatio.den,
NULL, 0);
-
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
- InitializeWithConfig(config);
+ InitializeWithConfigAndStatus(config, PIPELINE_ERROR_DECODE);
}
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_Normal) {
diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc
new file mode 100644
index 0000000..11580a3
--- /dev/null
+++ b/media/filters/gpu_video_decoder.cc
@@ -0,0 +1,443 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/gpu_video_decoder.h"
+
+#include "base/bind.h"
+#include "base/message_loop.h"
+#include "base/stl_util.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/filter_host.h"
+#include "media/base/video_decoder_config.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+
+namespace media {
+
+GpuVideoDecoder::Factories::~Factories() {}
+
+// Size of shared-memory segments we allocate. Since we reuse them we let them
+// be on the beefy side.
+static const size_t kSharedMemorySegmentBytes = 100 << 10;
+
+GpuVideoDecoder::SHMBuffer::SHMBuffer(base::SharedMemory* m, size_t s)
+ : shm(m), size(s) {
+}
+
+GpuVideoDecoder::SHMBuffer::~SHMBuffer() {}
+
+GpuVideoDecoder::BufferPair::BufferPair(
+ SHMBuffer* s, const scoped_refptr<Buffer>& b) : shm_buffer(s), buffer(b) {
+}
+
+GpuVideoDecoder::BufferPair::~BufferPair() {}
+
+GpuVideoDecoder::GpuVideoDecoder(
+ MessageLoop* message_loop,
+ Factories* factories)
+ : message_loop_(message_loop),
+ factories_(factories),
+ flush_in_progress_(false),
+ demuxer_read_in_progress_(false),
+ next_picture_buffer_id_(0),
+ next_bitstream_buffer_id_(0) {
+ DCHECK(message_loop_ && factories_.get());
+}
+
+GpuVideoDecoder::~GpuVideoDecoder() {
+ DCHECK(!vda_); // Stop should have been already called.
+ STLDeleteElements(&available_shm_segments_);
+ for (std::map<int32, BufferPair>::iterator it =
+ bitstream_buffers_in_decoder_.begin();
+ it != bitstream_buffers_in_decoder_.end(); ++it) {
+ it->second.shm_buffer->shm->Close();
+ }
+ bitstream_buffers_in_decoder_.clear();
+}
+
+void GpuVideoDecoder::Stop(const base::Closure& callback) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Stop, this, callback));
+ return;
+ }
+ if (!vda_) {
+ callback.Run();
+ return;
+ }
+ vda_->Destroy();
+ vda_ = NULL;
+ callback.Run();
+}
+
+void GpuVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Seek, this, time, cb));
+ return;
+ }
+ pts_stream_.Seek(time);
+ cb.Run(PIPELINE_OK);
+}
+
+void GpuVideoDecoder::Pause(const base::Closure& callback) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Pause, this, callback));
+ return;
+ }
+ callback.Run();
+}
+
+void GpuVideoDecoder::Flush(const base::Closure& callback) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Flush, this, callback));
+ return;
+ }
+ // Pipeline should have quiesced (via Pause() to all filters) before calling
+ // us, so there should be nothing pending.
+ DCHECK(pending_read_cb_.is_null());
+
+ // Throw away any already-decoded frames.
+ ready_video_frames_.clear();
+
+ if (!vda_) {
+ callback.Run();
+ return;
+ }
+ DCHECK(pending_flush_cb_.is_null());
+ pending_flush_cb_ = callback;
+ pts_stream_.Flush();
+ vda_->Reset();
+}
+
+void GpuVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
+ const PipelineStatusCB& callback,
+ const StatisticsCallback& stats_callback) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Initialize, this,
+ make_scoped_refptr(demuxer_stream), callback, stats_callback));
+ return;
+ }
+
+ DCHECK(!demuxer_stream_);
+ if (!demuxer_stream) {
+ callback.Run(PIPELINE_ERROR_DECODE);
+ return;
+ }
+
+ const VideoDecoderConfig& config = demuxer_stream->video_decoder_config();
+ // TODO(scherkus): this check should go in PipelineImpl prior to creating
+ // decoder objects.
+ if (!config.IsValidConfig()) {
+ DLOG(ERROR) << "Invalid video stream - " << config.AsHumanReadableString();
+ callback.Run(PIPELINE_ERROR_DECODE);
+ return;
+ }
+
+ vda_ = factories_->CreateVideoDecodeAccelerator(config.profile(), this);
+ if (!vda_) {
+ callback.Run(DECODER_ERROR_NOT_SUPPORTED);
+ return;
+ }
+
+ demuxer_stream_ = demuxer_stream;
+ statistics_callback_ = stats_callback;
+
+ demuxer_stream_->EnableBitstreamConverter();
+
+ pts_stream_.Initialize(GetFrameDuration(config));
+ natural_size_ = config.natural_size();
+
+ callback.Run(PIPELINE_OK);
+}
+
+void GpuVideoDecoder::Read(const ReadCB& callback) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::Read, this, callback));
+ return;
+ }
+
+ if (!vda_) {
+ callback.Run(VideoFrame::CreateEmptyFrame());
+ return;
+ }
+
+ DCHECK(pending_read_cb_.is_null());
+ pending_read_cb_ = callback;
+
+ if (!ready_video_frames_.empty()) {
+ DeliverFrame(ready_video_frames_.front());
+ ready_video_frames_.pop_front();
+ return;
+ }
+ EnsureDemuxOrDecode();
+}
+
+void GpuVideoDecoder::RequestBufferDecode(const scoped_refptr<Buffer>& buffer) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::RequestBufferDecode, this, buffer));
+ return;
+ }
+ demuxer_read_in_progress_ = false;
+
+ if (!vda_) {
+ DeliverFrame(VideoFrame::CreateEmptyFrame());
+ return;
+ }
+
+ if (buffer->IsEndOfStream()) {
+ if (!flush_in_progress_) {
+ flush_in_progress_ = true;
+ vda_->Flush();
+ }
+ return;
+ }
+
+ size_t size = buffer->GetDataSize();
+ SHMBuffer* shm_buffer = GetSHM(size);
+ memcpy(shm_buffer->shm->memory(), buffer->GetData(), size);
+ BitstreamBuffer bitstream_buffer(
+ next_bitstream_buffer_id_++, shm_buffer->shm->handle(), size);
+ bool inserted = bitstream_buffers_in_decoder_.insert(std::make_pair(
+ bitstream_buffer.id(), BufferPair(shm_buffer, buffer))).second;
+ DCHECK(inserted);
+ pts_stream_.EnqueuePts(buffer.get());
+
+ vda_->Decode(bitstream_buffer);
+}
+
+const gfx::Size& GpuVideoDecoder::natural_size() {
+ return natural_size_;
+}
+
+void GpuVideoDecoder::NotifyInitializeDone() {
+ NOTREACHED() << "GpuVideoDecodeAcceleratorHost::Initialize is synchronous!";
+}
+
+void GpuVideoDecoder::ProvidePictureBuffers(uint32 count,
+ const gfx::Size& size) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::ProvidePictureBuffers, this, count, size));
+ return;
+ }
+
+ std::vector<uint32> texture_ids;
+ if (!factories_->CreateTextures(count, size, &texture_ids)) {
+ NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+
+ if (!vda_)
+ return;
+
+ std::vector<PictureBuffer> picture_buffers;
+ for (size_t i = 0; i < texture_ids.size(); ++i) {
+ picture_buffers.push_back(PictureBuffer(
+ next_picture_buffer_id_++, size, texture_ids[i]));
+ bool inserted = picture_buffers_in_decoder_.insert(std::make_pair(
+ picture_buffers.back().id(), picture_buffers.back())).second;
+ DCHECK(inserted);
+ }
+ vda_->AssignPictureBuffers(picture_buffers);
+}
+
+void GpuVideoDecoder::DismissPictureBuffer(int32 id) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::DismissPictureBuffer, this, id));
+ return;
+ }
+ std::map<int32, PictureBuffer>::iterator it =
+ picture_buffers_in_decoder_.find(id);
+ if (it == picture_buffers_in_decoder_.end()) {
+ NOTREACHED() << "Missing picture buffer: " << id;
+ return;
+ }
+ if (!factories_->DeleteTexture(it->second.texture_id())) {
+ NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+ picture_buffers_in_decoder_.erase(it);
+}
+
+static void ResetAndRunCB(VideoDecoder::ReadCB* cb,
+ scoped_refptr<VideoFrame> frame) {
+ DCHECK(!cb->is_null());
+ VideoDecoder::ReadCB tmp_cb(*cb);
+ cb->Reset();
+ tmp_cb.Run(frame);
+}
+
+void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::PictureReady, this, picture));
+ return;
+ }
+ std::map<int32, PictureBuffer>::iterator it =
+ picture_buffers_in_decoder_.find(picture.picture_buffer_id());
+ if (it == picture_buffers_in_decoder_.end()) {
+ NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id();
+ NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
+ const PictureBuffer& pb = it->second;
+
+ // Update frame's timestamp.
+ base::TimeDelta timestamp;
+ base::TimeDelta duration;
+ std::map<int32, BufferPair>::const_iterator buf_it =
+ bitstream_buffers_in_decoder_.find(picture.bitstream_buffer_id());
+ if (buf_it != bitstream_buffers_in_decoder_.end()) {
+ // Sufficiently out-of-order decoding could have already called
+ // NotifyEndOfBitstreamBuffer on this buffer, but that's ok since we only
+ // need the buffer's time info for best-effort PTS updating.
+ timestamp = buf_it->second.buffer->GetTimestamp();
+ duration = buf_it->second.buffer->GetDuration();
+ }
+
+ scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture(
+ pb.texture_id(), pb.size().width(),
+ pb.size().height(), timestamp, duration,
+ base::Bind(&GpuVideoDecoder::ReusePictureBuffer, this,
+ picture.picture_buffer_id())));
+ pts_stream_.UpdatePtsAndDuration(frame.get());
+ frame->SetTimestamp(pts_stream_.current_pts());
+ frame->SetDuration(pts_stream_.current_duration());
+
+ // Deliver the frame.
+ DeliverFrame(frame);
+}
+
+void GpuVideoDecoder::DeliverFrame(const scoped_refptr<VideoFrame>& frame) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::DeliverFrameOutOfLine, this, frame));
+}
+
+void GpuVideoDecoder::DeliverFrameOutOfLine(
+ const scoped_refptr<VideoFrame>& frame) {
+ if (pending_read_cb_.is_null()) {
+ ready_video_frames_.push_back(frame);
+ return;
+ }
+ ResetAndRunCB(&pending_read_cb_, frame);
+}
+
+void GpuVideoDecoder::ReusePictureBuffer(int64 picture_buffer_id) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::ReusePictureBuffer, this, picture_buffer_id));
+ return;
+ }
+ if (!vda_)
+ return;
+ vda_->ReusePictureBuffer(picture_buffer_id);
+}
+
+GpuVideoDecoder::SHMBuffer* GpuVideoDecoder::GetSHM(size_t min_size) {
+ DCHECK(MessageLoop::current() == message_loop_);
+ if (available_shm_segments_.empty() ||
+ available_shm_segments_.back()->size < min_size) {
+ size_t size_to_allocate = std::max(min_size, kSharedMemorySegmentBytes);
+ base::SharedMemory* shm = factories_->CreateSharedMemory(size_to_allocate);
+ DCHECK(shm);
+ return new SHMBuffer(shm, size_to_allocate);
+ }
+ SHMBuffer* ret = available_shm_segments_.back();
+ available_shm_segments_.pop_back();
+ return ret;
+}
+
+void GpuVideoDecoder::PutSHM(SHMBuffer* shm_buffer) {
+ DCHECK(MessageLoop::current() == message_loop_);
+ available_shm_segments_.push_back(shm_buffer);
+}
+
+void GpuVideoDecoder::NotifyEndOfStream() {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::NotifyEndOfStream, this));
+ return;
+ }
+ DeliverFrame(VideoFrame::CreateEmptyFrame());
+}
+
+void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::NotifyEndOfBitstreamBuffer, this, id));
+ return;
+ }
+
+ std::map<int32, BufferPair>::iterator it =
+ bitstream_buffers_in_decoder_.find(id);
+ if (it == bitstream_buffers_in_decoder_.end()) {
+ NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
+ NOTREACHED() << "Missing bitstream buffer: " << id;
+ return;
+ }
+ PutSHM(it->second.shm_buffer);
+ const scoped_refptr<Buffer>& buffer = it->second.buffer;
+ if (buffer->GetDataSize()) {
+ PipelineStatistics statistics;
+ statistics.video_bytes_decoded = buffer->GetDataSize();
+ statistics_callback_.Run(statistics);
+ }
+ bitstream_buffers_in_decoder_.erase(it);
+
+ if (!pending_read_cb_.is_null()) {
+ DCHECK(ready_video_frames_.empty());
+ EnsureDemuxOrDecode();
+ }
+}
+
+void GpuVideoDecoder::EnsureDemuxOrDecode() {
+ DCHECK(MessageLoop::current() == message_loop_);
+ if (demuxer_read_in_progress_ || !bitstream_buffers_in_decoder_.empty())
+ return;
+ demuxer_read_in_progress_ = true;
+ demuxer_stream_->Read(base::Bind(
+ &GpuVideoDecoder::RequestBufferDecode, this));
+}
+
+void GpuVideoDecoder::NotifyFlushDone() {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::NotifyFlushDone, this));
+ return;
+ }
+ DCHECK(flush_in_progress_);
+ flush_in_progress_ = false;
+ DeliverFrame(VideoFrame::CreateEmptyFrame());
+}
+
+void GpuVideoDecoder::NotifyResetDone() {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::NotifyResetDone, this));
+ return;
+ }
+ // Throw away any already-decoded frames that have come in during the reset.
+ ready_video_frames_.clear();
+ ResetAndRunCB(&pending_flush_cb_);
+}
+
+void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
+ if (MessageLoop::current() != message_loop_) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &GpuVideoDecoder::NotifyError, this, error));
+ return;
+ }
+ vda_ = NULL;
+ DLOG(ERROR) << "VDA Error: " << error;
+ if (host())
+ host()->SetError(PIPELINE_ERROR_DECODE);
+}
+
+} // namespace media
diff --git a/media/filters/gpu_video_decoder.h b/media/filters/gpu_video_decoder.h
new file mode 100644
index 0000000..bb841ea
--- /dev/null
+++ b/media/filters/gpu_video_decoder.h
@@ -0,0 +1,169 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_GPU_VIDEO_DECODER_H_
+#define MEDIA_FILTERS_GPU_VIDEO_DECODER_H_
+
+#include <deque>
+#include <list>
+#include <map>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/filters.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/pts_stream.h"
+#include "media/video/video_decode_accelerator.h"
+#include "ui/gfx/size.h"
+
+class MessageLoop;
+namespace base {
+class SharedMemory;
+}
+
+namespace media {
+
+// GPU-accelerated video decoder implementation. Relies on
+// AcceleratedVideoDecoderMsg_Decode and friends.
+// All methods internally trampoline to the message_loop passed to the ctor.
+class MEDIA_EXPORT GpuVideoDecoder
+ : public VideoDecoder,
+ public VideoDecodeAccelerator::Client {
+ public:
+ // Helper interface for specifying factories needed to instantiate a
+ // GpuVideoDecoder.
+ class MEDIA_EXPORT Factories {
+ public:
+ virtual ~Factories();
+
+ // Caller owns returned pointer.
+ virtual VideoDecodeAccelerator* CreateVideoDecodeAccelerator(
+ VideoDecodeAccelerator::Profile, VideoDecodeAccelerator::Client*) = 0;
+
+ // Allocate & delete native textures.
+ virtual bool CreateTextures(int32 count, const gfx::Size& size,
+ std::vector<uint32>* texture_ids) = 0;
+ virtual bool DeleteTexture(uint32 texture_id) = 0;
+
+ // Allocate & return a shared memory segment. Caller is responsible for
+ // Close()ing the returned pointer.
+ virtual base::SharedMemory* CreateSharedMemory(size_t size) = 0;
+ };
+
+ // Takes ownership of |factories| but not |message_loop|.
+ GpuVideoDecoder(MessageLoop* message_loop, Factories* factories);
+ virtual ~GpuVideoDecoder();
+
+ // Filter implementation.
+ virtual void Stop(const base::Closure& callback) OVERRIDE;
+ virtual void Seek(base::TimeDelta time, const FilterStatusCB& cb) OVERRIDE;
+ virtual void Pause(const base::Closure& callback) OVERRIDE;
+ virtual void Flush(const base::Closure& callback) OVERRIDE;
+
+ // VideoDecoder implementation.
+ virtual void Initialize(DemuxerStream* demuxer_stream,
+ const PipelineStatusCB& callback,
+ const StatisticsCallback& stats_callback) OVERRIDE;
+ virtual void Read(const ReadCB& callback) OVERRIDE;
+ virtual const gfx::Size& natural_size() OVERRIDE;
+
+ // VideoDecodeAccelerator::Client implementation.
+ virtual void NotifyInitializeDone() OVERRIDE;
+ virtual void ProvidePictureBuffers(uint32 count,
+ const gfx::Size& size) OVERRIDE;
+ virtual void DismissPictureBuffer(int32 id) OVERRIDE;
+ virtual void PictureReady(const media::Picture& picture) OVERRIDE;
+ virtual void NotifyEndOfStream() OVERRIDE;
+ virtual void NotifyEndOfBitstreamBuffer(int32 id) OVERRIDE;
+ virtual void NotifyFlushDone() OVERRIDE;
+ virtual void NotifyResetDone() OVERRIDE;
+ virtual void NotifyError(media::VideoDecodeAccelerator::Error error) OVERRIDE;
+
+ private:
+ // If no demuxer read is in flight and no bitstream buffers are in the
+ // decoder, kick some off demuxing/decoding.
+ void EnsureDemuxOrDecode();
+
+ // Callback to pass to demuxer_stream_->Read() for receiving encoded bits.
+ void RequestBufferDecode(const scoped_refptr<Buffer>& buffer);
+
+ // Deliver a frame to the client. Because VideoDecoder::Read() promises not
+ // to run its callback before returning, we need an out-of-line helper here.
+ void DeliverFrame(const scoped_refptr<VideoFrame>& frame);
+ void DeliverFrameOutOfLine(const scoped_refptr<VideoFrame>& frame);
+
+ // Indicate the picturebuffer can be reused by the decoder.
+ void ReusePictureBuffer(int64 picture_buffer_id);
+
+ // A shared memory segment and its allocated size.
+ struct SHMBuffer {
+ SHMBuffer(base::SharedMemory* m, size_t s);
+ ~SHMBuffer();
+ base::SharedMemory* shm;
+ size_t size;
+ };
+
+ // Request a shared-memory segment of at least |min_size| bytes. Will
+ // allocate as necessary. Caller does not own returned pointer.
+ SHMBuffer* GetSHM(size_t min_size);
+
+ // Return a shared-memory segment to the available pool.
+ void PutSHM(SHMBuffer* shm_buffer);
+
+ PtsStream pts_stream_;
+ StatisticsCallback statistics_callback_;
+
+ // TODO(scherkus): I think this should be calculated by VideoRenderers based
+ // on information provided by VideoDecoders (i.e., aspect ratio).
+ gfx::Size natural_size_;
+
+ // Pointer to the demuxer stream that will feed us compressed buffers.
+ scoped_refptr<DemuxerStream> demuxer_stream_;
+
+ // MessageLoop on which to do fire callbacks and to which trampoline calls to
+ // this class if they arrive on other loops.
+ MessageLoop* message_loop_;
+
+ scoped_ptr<Factories> factories_;
+
+ // Populated during Initialize() (on success) and unchanged thereafter.
+ scoped_refptr<VideoDecodeAccelerator> vda_;
+
+ // Callbacks that are !is_null() only during their respective operation being
+ // asynchronously executed.
+ ReadCB pending_read_cb_;
+ base::Closure pending_flush_cb_;
+
+ // Status of the decoder.
+ bool flush_in_progress_;
+
+ // Is a demuxer read in flight?
+ bool demuxer_read_in_progress_;
+
+ // Shared-memory buffer pool. Since allocating SHM segments requires a
+ // round-trip to the browser process, we keep allocation out of the
+ // steady-state of the decoder.
+ std::vector<SHMBuffer*> available_shm_segments_;
+
+ // Book-keeping variables.
+ struct BufferPair {
+ BufferPair(SHMBuffer* s, const scoped_refptr<Buffer>& b);
+ ~BufferPair();
+ SHMBuffer* shm_buffer;
+ scoped_refptr<Buffer> buffer;
+ };
+ std::map<int32, BufferPair> bitstream_buffers_in_decoder_;
+ std::map<int32, PictureBuffer> picture_buffers_in_decoder_;
+
+ // picture_buffer_id and the frame wrapping the corresponding Picture, for
+ // frames that have been decoded but haven't been requested by a Read() yet.
+ std::list<scoped_refptr<VideoFrame> > ready_video_frames_;
+ int64 next_picture_buffer_id_;
+ int64 next_bitstream_buffer_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoder);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_GPU_VIDEO_DECODER_H_
diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc
index 4cd7775..7b40fbc 100644
--- a/media/filters/video_renderer_base.cc
+++ b/media/filters/video_renderer_base.cc
@@ -97,17 +97,15 @@ void VideoRendererBase::SetPlaybackRate(float playback_rate) {
}
void VideoRendererBase::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
- {
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(state_, kFlushed) << "Must flush prior to seeking.";
- DCHECK(!cb.is_null());
- DCHECK(seek_cb_.is_null());
-
- state_ = kSeeking;
- seek_cb_ = cb;
- seek_timestamp_ = time;
- AttemptRead_Locked();
- }
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(state_, kFlushed) << "Must flush prior to seeking.";
+ DCHECK(!cb.is_null());
+ DCHECK(seek_cb_.is_null());
+
+ state_ = kSeeking;
+ seek_cb_ = cb;
+ seek_timestamp_ = time;
+ AttemptRead_Locked();
}
void VideoRendererBase::Initialize(VideoDecoder* decoder,
@@ -276,7 +274,6 @@ void VideoRendererBase::ThreadMain() {
frames_queue_ready_.pop_front();
AttemptRead_Locked();
}
-
// Continue waiting for the current paint to finish.
continue;
}
@@ -479,7 +476,6 @@ void VideoRendererBase::DoStopOrError_Locked() {
lock_.AssertAcquired();
current_frame_ = NULL;
last_available_frame_ = NULL;
- DCHECK(!pending_read_);
}
} // namespace media
diff --git a/media/media.gyp b/media/media.gyp
index abe8310..265ca69 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -205,6 +205,8 @@
'filters/file_data_source.h',
'filters/file_data_source_factory.cc',
'filters/file_data_source_factory.h',
+ 'filters/gpu_video_decoder.cc',
+ 'filters/gpu_video_decoder.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
'filters/null_audio_renderer.cc',
diff --git a/media/video/video_decode_accelerator.h b/media/video/video_decode_accelerator.h
index 9a31bfd..ab5d1fb 100644
--- a/media/video/video_decode_accelerator.h
+++ b/media/video/video_decode_accelerator.h
@@ -10,6 +10,7 @@
#include "base/basictypes.h"
#include "base/callback_old.h"
#include "media/base/bitstream_buffer.h"
+#include "media/base/video_decoder_config.h"
#include "media/video/picture.h"
#include "ui/gfx/size.h"
@@ -24,25 +25,8 @@ namespace media {
class MEDIA_EXPORT VideoDecodeAccelerator
: public base::RefCountedThreadSafe<VideoDecodeAccelerator> {
public:
- // Video stream profile. This *must* match PP_VideoDecoder_Profile.
- enum Profile {
- // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
- // for example), and keep the values for a particular format grouped
- // together for clarity.
- H264PROFILE_MIN = 0,
- H264PROFILE_BASELINE = H264PROFILE_MIN,
- H264PROFILE_MAIN,
- H264PROFILE_EXTENDED,
- H264PROFILE_HIGH,
- H264PROFILE_HIGH10PROFILE,
- H264PROFILE_HIGH422PROFILE,
- H264PROFILE_HIGH444PREDICTIVEPROFILE,
- H264PROFILE_SCALABLEBASELINE,
- H264PROFILE_SCALABLEHIGH,
- H264PROFILE_STEREOHIGH,
- H264PROFILE_MULTIVIEWHIGH,
- H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
- };
+ // TODO(fischman): fix foreign references to this and get rid of this typedef.
+ typedef VideoCodecProfile Profile;
// Enumeration of potential errors generated by the API.
// Note: Keep these in sync with PP_VideoDecodeError_Dev.
@@ -64,7 +48,7 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// This interface is extended by the various layers that relay messages back
// to the plugin, through the PPP_VideoDecode_Dev interface the plugin
// implements.
- class Client {
+ class MEDIA_EXPORT Client {
public:
virtual ~Client() {}