diff options
author | acolwell@chromium.org <acolwell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-08-02 22:42:52 +0000 |
---|---|---|
committer | acolwell@chromium.org <acolwell@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-08-02 22:42:52 +0000 |
commit | e4ee4911dfbfe88088e68275bb6799b1078319a3 (patch) | |
tree | b09d499ef3cc1a2a283f2218958d1e87778e9c2f | |
parent | 79f59939f69556273b5f65fa012402d2c6431e7e (diff) | |
download | chromium_src-e4ee4911dfbfe88088e68275bb6799b1078319a3.zip chromium_src-e4ee4911dfbfe88088e68275bb6799b1078319a3.tar.gz chromium_src-e4ee4911dfbfe88088e68275bb6799b1078319a3.tar.bz2 |
Remove VideoDecoder::natural_size() & added VideoFrame::natural_size().
BUG=122913
Review URL: https://chromiumcodereview.appspot.com/10824141
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@149728 0039d316-1c4b-4281-b951-d872f2087c98
34 files changed, 338 insertions, 310 deletions
diff --git a/content/renderer/media/capture_video_decoder.cc b/content/renderer/media/capture_video_decoder.cc index b55a6c7..6980c3d 100644 --- a/content/renderer/media/capture_video_decoder.cc +++ b/content/renderer/media/capture_video_decoder.cc @@ -61,10 +61,6 @@ void CaptureVideoDecoder::Stop(const base::Closure& closure) { base::Bind(&CaptureVideoDecoder::StopOnDecoderThread, this, closure)); } -const gfx::Size& CaptureVideoDecoder::natural_size() { - return natural_size_; -} - void CaptureVideoDecoder::PrepareForShutdownHack() { message_loop_proxy_->PostTask( FROM_HERE, @@ -139,6 +135,7 @@ void CaptureVideoDecoder::InitializeOnDecoderThread( } void CaptureVideoDecoder::ReadOnDecoderThread(const ReadCB& read_cb) { + DCHECK_NE(state_, kUnInitialized); DCHECK(message_loop_proxy_->BelongsToCurrentThread()); CHECK(read_cb_.is_null()); read_cb_ = read_cb; @@ -152,8 +149,7 @@ void CaptureVideoDecoder::ResetOnDecoderThread(const base::Closure& closure) { DCHECK(message_loop_proxy_->BelongsToCurrentThread()); if (!read_cb_.is_null()) { scoped_refptr<media::VideoFrame> video_frame = - media::VideoFrame::CreateBlackFrame(natural_size_.width(), - natural_size_.height()); + media::VideoFrame::CreateBlackFrame(natural_size_); DeliverFrame(video_frame); } closure.Run(); @@ -240,8 +236,7 @@ void CaptureVideoDecoder::OnBufferReadyOnDecoderThread( // TODO(scherkus): migrate this to proper buffer recycling. scoped_refptr<media::VideoFrame> video_frame = media::VideoFrame::CreateFrame(media::VideoFrame::YV12, - natural_size_.width(), - natural_size_.height(), + natural_size_, natural_size_, buf->timestamp - start_time_); last_frame_timestamp_ = buf->timestamp; diff --git a/content/renderer/media/capture_video_decoder.h b/content/renderer/media/capture_video_decoder.h index 85384aa..75bf153 100644 --- a/content/renderer/media/capture_video_decoder.h +++ b/content/renderer/media/capture_video_decoder.h @@ -39,7 +39,6 @@ class CONTENT_EXPORT CaptureVideoDecoder virtual void Read(const ReadCB& read_cb) OVERRIDE; virtual void Reset(const base::Closure& closure) OVERRIDE; virtual void Stop(const base::Closure& closure) OVERRIDE; - virtual const gfx::Size& natural_size() OVERRIDE; virtual void PrepareForShutdownHack() OVERRIDE; // VideoCapture::EventHandler implementation. diff --git a/content/renderer/media/capture_video_decoder_unittest.cc b/content/renderer/media/capture_video_decoder_unittest.cc index 643fc50..57aab50 100644 --- a/content/renderer/media/capture_video_decoder_unittest.cc +++ b/content/renderer/media/capture_video_decoder_unittest.cc @@ -24,19 +24,6 @@ static const int kHeight = 144; static const int kFPS = 30; static const media::VideoCaptureSessionId kVideoStreamId = 1; -ACTION_P3(CreateDataBufferFromCapture, decoder, vc_impl, data_buffer_number) { - for (int i = 0; i < data_buffer_number; i++) { - media::VideoCapture::VideoFrameBuffer* buffer; - buffer = new media::VideoCapture::VideoFrameBuffer(); - buffer->width = arg1.width; - buffer->height = arg1.height; - int length = buffer->width * buffer->height * 3 / 2; - buffer->memory_pointer = new uint8[length]; - buffer->buffer_size = length; - decoder->OnBufferReady(vc_impl, buffer); - } -} - ACTION(DeleteDataBuffer) { delete[] arg0->memory_pointer; } @@ -45,6 +32,17 @@ ACTION_P2(CaptureStopped, decoder, vc_impl) { decoder->OnStopped(vc_impl); } +MATCHER_P2(HasSize, width, height, "") { + EXPECT_EQ(arg->data_size().width(), width); + EXPECT_EQ(arg->data_size().height(), height); + EXPECT_EQ(arg->natural_size().width(), width); + EXPECT_EQ(arg->natural_size().height(), height); + return (arg->data_size().width() == width) && + (arg->data_size().height() == height) && + (arg->natural_size().width() == width) && + (arg->natural_size().height() == height); +} + class MockVideoCaptureImpl : public VideoCaptureImpl { public: MockVideoCaptureImpl(const media::VideoCaptureSessionId id, @@ -119,25 +117,17 @@ class CaptureVideoDecoderTest : public ::testing::Test { } void Initialize() { - // Issue a read. - EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, _)); - decoder_->Read(read_cb_); - EXPECT_CALL(*vc_manager_, AddDevice(_, _)) .WillOnce(Return(vc_impl_.get())); - int buffer_count = 1; - EXPECT_CALL(*vc_impl_, StartCapture(capture_client(), _)) - .Times(1) - .WillOnce(CreateDataBufferFromCapture(capture_client(), - vc_impl_.get(), - buffer_count)); - EXPECT_CALL(*vc_impl_, FeedBuffer(_)) - .Times(buffer_count) - .WillRepeatedly(DeleteDataBuffer()); - + EXPECT_CALL(*vc_impl_, StartCapture(capture_client(), _)); decoder_->Initialize(NULL, media::NewExpectedStatusCB(media::PIPELINE_OK), NewStatisticsCB()); + + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(kWidth, kHeight))); + decoder_->Read(read_cb_); + SendBufferToDecoder(gfx::Size(kWidth, kHeight)); message_loop_->RunAllPending(); } @@ -155,6 +145,20 @@ class CaptureVideoDecoderTest : public ::testing::Test { return static_cast<media::VideoCapture::EventHandler*>(decoder_); } + void SendBufferToDecoder(const gfx::Size& size) { + scoped_refptr<media::VideoCapture::VideoFrameBuffer> buffer = + new media::VideoCapture::VideoFrameBuffer(); + buffer->width = size.width(); + buffer->height = size.height(); + int length = buffer->width * buffer->height * 3 / 2; + buffer->memory_pointer = new uint8[length]; + buffer->buffer_size = length; + + EXPECT_CALL(*vc_impl_, FeedBuffer(_)) + .WillOnce(DeleteDataBuffer()); + decoder_->OnBufferReady(vc_impl_.get(), buffer); + } + MOCK_METHOD2(FrameReady, void(media::VideoDecoder::DecoderStatus status, const scoped_refptr<media::VideoFrame>&)); @@ -175,11 +179,8 @@ class CaptureVideoDecoderTest : public ::testing::Test { TEST_F(CaptureVideoDecoderTest, ReadAndReset) { // Test basic initialize and teardown sequence. Initialize(); - // Natural size should be initialized to default capability. - EXPECT_EQ(kWidth, decoder_->natural_size().width()); - EXPECT_EQ(kHeight, decoder_->natural_size().height()); - - EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, _)); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(kWidth, kHeight))); decoder_->Read(read_cb_); decoder_->Reset(media::NewExpectedClosure()); message_loop_->RunAllPending(); @@ -200,10 +201,13 @@ TEST_F(CaptureVideoDecoderTest, OnDeviceInfoReceived) { params.session_id = kVideoStreamId; decoder_->OnDeviceInfoReceived(vc_impl_.get(), params); - message_loop_->RunAllPending(); - EXPECT_EQ(expected_size.width(), decoder_->natural_size().width()); - EXPECT_EQ(expected_size.height(), decoder_->natural_size().height()); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(expected_size.width(), + expected_size.height()))); + decoder_->Read(read_cb_); + SendBufferToDecoder(expected_size); + message_loop_->RunAllPending(); Stop(); } @@ -213,7 +217,8 @@ TEST_F(CaptureVideoDecoderTest, ReadAndShutdown) { // teardown the pipeline) even when there's no input frame. Initialize(); - EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, _)).Times(2); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(0, 0))).Times(2); decoder_->Read(read_cb_); decoder_->PrepareForShutdownHack(); decoder_->Read(read_cb_); @@ -221,4 +226,3 @@ TEST_F(CaptureVideoDecoderTest, ReadAndShutdown) { Stop(); } - diff --git a/content/renderer/media/rtc_video_decoder.cc b/content/renderer/media/rtc_video_decoder.cc index f4a0e73..8108567 100644 --- a/content/renderer/media/rtc_video_decoder.cc +++ b/content/renderer/media/rtc_video_decoder.cc @@ -102,11 +102,6 @@ void RTCVideoDecoder::Stop(const base::Closure& closure) { closure.Run(); } -const gfx::Size& RTCVideoDecoder::natural_size() { - // TODO(vrk): Return natural size when aspect ratio support is implemented. - return visible_size_; -} - void RTCVideoDecoder::PrepareForShutdownHack() { if (!video_decoder_thread_->RunsTasksOnCurrentThread()) { video_decoder_thread_->PostTask( @@ -159,8 +154,8 @@ void RTCVideoDecoder::RenderFrame(const cricket::VideoFrame* frame) { // TODO(scherkus): migrate this to proper buffer recycling. scoped_refptr<media::VideoFrame> video_frame = media::VideoFrame::CreateFrame(media::VideoFrame::YV12, - visible_size_.width(), - visible_size_.height(), + visible_size_, + visible_size_, timestamp - start_time_); last_frame_timestamp_ = timestamp; diff --git a/content/renderer/media/rtc_video_decoder.h b/content/renderer/media/rtc_video_decoder.h index eb7f38a..857cc99 100644 --- a/content/renderer/media/rtc_video_decoder.h +++ b/content/renderer/media/rtc_video_decoder.h @@ -42,9 +42,8 @@ class CONTENT_EXPORT RTCVideoDecoder const media::PipelineStatusCB& status_cb, const media::StatisticsCB& statistics_cb) OVERRIDE; virtual void Read(const ReadCB& read_cb) OVERRIDE; - virtual void Reset(const base::Closure& clusure) OVERRIDE; - virtual void Stop(const base::Closure& clusure) OVERRIDE; - virtual const gfx::Size& natural_size() OVERRIDE; + virtual void Reset(const base::Closure& closure) OVERRIDE; + virtual void Stop(const base::Closure& closure) OVERRIDE; virtual void PrepareForShutdownHack() OVERRIDE; // webrtc::VideoRendererInterface implementation diff --git a/content/renderer/media/rtc_video_decoder_unittest.cc b/content/renderer/media/rtc_video_decoder_unittest.cc index 441d9c8..6bfe588 100644 --- a/content/renderer/media/rtc_video_decoder_unittest.cc +++ b/content/renderer/media/rtc_video_decoder_unittest.cc @@ -222,6 +222,11 @@ class RTCVideoDecoderTest : public testing::Test { base::Unretained(&statistics_cb_)); } + void RenderFrame() { + NullVideoFrame video_frame; + decoder_->RenderFrame(&video_frame); + } + MOCK_METHOD2(FrameReady, void(media::VideoDecoder::DecoderStatus status, const scoped_refptr<media::VideoFrame>&)); @@ -241,19 +246,31 @@ const int RTCVideoDecoderTest::kWidth = 640; const int RTCVideoDecoderTest::kHeight = 480; const PipelineStatistics RTCVideoDecoderTest::kStatistics; +MATCHER_P2(HasSize, width, height, "") { + EXPECT_EQ(arg->data_size().width(), width); + EXPECT_EQ(arg->data_size().height(), height); + EXPECT_EQ(arg->natural_size().width(), width); + EXPECT_EQ(arg->natural_size().height(), height); + return (arg->data_size().width() == width) && + (arg->data_size().height() == height) && + (arg->natural_size().width() == width) && + (arg->natural_size().height() == height); +} + TEST_F(RTCVideoDecoderTest, Initialize_Successful) { InitializeDecoderSuccessfully(); - // Test that the output media format is an uncompressed video surface that - // matches the dimensions specified by RTC. - EXPECT_EQ(kWidth, decoder_->natural_size().width()); - EXPECT_EQ(kHeight, decoder_->natural_size().height()); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(kWidth, kHeight))); + decoder_->Read(read_cb_); + RenderFrame(); } TEST_F(RTCVideoDecoderTest, DoReset) { InitializeDecoderSuccessfully(); - EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, _)); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + scoped_refptr<media::VideoFrame>())); decoder_->Read(read_cb_); decoder_->Reset(media::NewExpectedClosure()); @@ -264,11 +281,8 @@ TEST_F(RTCVideoDecoderTest, DoReset) { TEST_F(RTCVideoDecoderTest, DoRenderFrame) { InitializeDecoderSuccessfully(); - NullVideoFrame video_frame; - - for (size_t i = 0; i < media::limits::kMaxVideoFrames; ++i) { - decoder_->RenderFrame(&video_frame); - } + for (size_t i = 0; i < media::limits::kMaxVideoFrames; ++i) + RenderFrame(); message_loop_.RunAllPending(); EXPECT_EQ(RTCVideoDecoder::kNormal, decoder_->state_); @@ -277,15 +291,20 @@ TEST_F(RTCVideoDecoderTest, DoRenderFrame) { TEST_F(RTCVideoDecoderTest, DoSetSize) { InitializeDecoderSuccessfully(); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(kWidth, kHeight))); + decoder_->Read(read_cb_); + RenderFrame(); + message_loop_.RunAllPending(); + int new_width = kWidth * 2; int new_height = kHeight * 2; - gfx::Size new_natural_size(new_width, new_height); - decoder_->SetSize(new_width, new_height); - EXPECT_EQ(new_width, decoder_->natural_size().width()); - EXPECT_EQ(new_height, decoder_->natural_size().height()); - + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + HasSize(new_width, new_height))); + decoder_->Read(read_cb_); + RenderFrame(); message_loop_.RunAllPending(); } @@ -294,7 +313,8 @@ TEST_F(RTCVideoDecoderTest, ReadAndShutdown) { // teardown the pipeline) even when there's no input frame. InitializeDecoderSuccessfully(); - EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, _)).Times(2); + EXPECT_CALL(*this, FrameReady(media::VideoDecoder::kOk, + scoped_refptr<media::VideoFrame>())).Times(2); decoder_->Read(read_cb_); EXPECT_FALSE(decoder_->shutting_down_); decoder_->PrepareForShutdownHack(); diff --git a/media/base/video_decoder.h b/media/base/video_decoder.h index c550033..c8c19b7 100644 --- a/media/base/video_decoder.h +++ b/media/base/video_decoder.h @@ -60,15 +60,6 @@ class MEDIA_EXPORT VideoDecoder // should/could not be re-initialized after it has been stopped. virtual void Stop(const base::Closure& closure) = 0; - // Returns the natural width and height of decoded video in pixels. - // - // Clients should NOT rely on these values to remain constant. Instead, use - // the width/height from decoded video frames themselves. - // - // TODO(scherkus): why not rely on prerolling and decoding a single frame to - // get dimensions? - virtual const gfx::Size& natural_size() = 0; - // Returns true if the output format has an alpha channel. Most formats do not // have alpha so the default is false. Override and return true for decoders // that return formats with an alpha channel. diff --git a/media/base/video_decoder_config.cc b/media/base/video_decoder_config.cc index cb026f8..1cbf5f8 100644 --- a/media/base/video_decoder_config.cc +++ b/media/base/video_decoder_config.cc @@ -108,8 +108,7 @@ bool VideoDecoderConfig::IsValidConfig() const { return codec_ != kUnknownVideoCodec && natural_size_.width() > 0 && natural_size_.height() > 0 && - VideoFrame::IsValidConfig( - format_, natural_size_.width(), natural_size_.height()); + VideoFrame::IsValidConfig(format_, visible_rect().size(), natural_size_); } bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const { diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc index 9e5ff7a..5409e84 100644 --- a/media/base/video_frame.cc +++ b/media/base/video_frame.cc @@ -19,12 +19,12 @@ namespace media { // static scoped_refptr<VideoFrame> VideoFrame::CreateFrame( VideoFrame::Format format, - size_t width, - size_t height, + const gfx::Size& data_size, + const gfx::Size& natural_size, base::TimeDelta timestamp) { - DCHECK(IsValidConfig(format, width, height)); + DCHECK(IsValidConfig(format, data_size, natural_size)); scoped_refptr<VideoFrame> frame(new VideoFrame( - format, width, height, timestamp)); + format, data_size, natural_size, timestamp)); switch (format) { case VideoFrame::RGB32: frame->AllocateRGB(4u); @@ -40,27 +40,30 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame( } // static -bool VideoFrame::IsValidConfig( - VideoFrame::Format format, - size_t width, - size_t height) { - +bool VideoFrame::IsValidConfig(VideoFrame::Format format, + const gfx::Size& data_size, + const gfx::Size& natural_size) { return (format != VideoFrame::INVALID && - width > 0 && height > 0 && - width <= limits::kMaxDimension && height <= limits::kMaxDimension && - width * height <= limits::kMaxCanvas); + data_size.width() > 0 && data_size.height() > 0 && + data_size.width() <= limits::kMaxDimension && + data_size.height() <= limits::kMaxDimension && + data_size.width() * data_size.height() <= limits::kMaxCanvas && + natural_size.width() > 0 && natural_size.height() > 0 && + natural_size.width() <= limits::kMaxDimension && + natural_size.height() <= limits::kMaxDimension && + natural_size.width() * natural_size.height() <= limits::kMaxCanvas); } // static scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture( uint32 texture_id, uint32 texture_target, - size_t width, - size_t height, + const gfx::Size& data_size, + const gfx::Size& natural_size, base::TimeDelta timestamp, const base::Closure& no_longer_needed) { scoped_refptr<VideoFrame> frame( - new VideoFrame(NATIVE_TEXTURE, width, height, timestamp)); + new VideoFrame(NATIVE_TEXTURE, data_size, natural_size, timestamp)); frame->texture_id_ = texture_id; frame->texture_target_ = texture_target; frame->texture_no_longer_needed_ = no_longer_needed; @@ -70,18 +73,18 @@ scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture( // static scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() { return new VideoFrame( - VideoFrame::EMPTY, 0, 0, base::TimeDelta()); + VideoFrame::EMPTY, gfx::Size(), gfx::Size(), base::TimeDelta()); } // static -scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(int width, int height) { - DCHECK_GT(width, 0); - DCHECK_GT(height, 0); +scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame( + const gfx::Size& data_size) { + DCHECK(IsValidConfig(VideoFrame::YV12, data_size, data_size)); // Create our frame. const base::TimeDelta kZero; scoped_refptr<VideoFrame> frame = - VideoFrame::CreateFrame(VideoFrame::YV12, width, height, kZero); + VideoFrame::CreateFrame(VideoFrame::YV12, data_size, data_size, kZero); // Now set the data to YUV(0,128,128). const uint8 kBlackY = 0x00; @@ -103,8 +106,9 @@ static const int kFramePadBytes = 15; void VideoFrame::AllocateRGB(size_t bytes_per_pixel) { // Round up to align at least at a 16-byte boundary for each row. // This is sufficient for MMX and SSE2 reads (movq/movdqa). - size_t bytes_per_row = RoundUp(width_, kFrameSizeAlignment) * bytes_per_pixel; - size_t aligned_height = RoundUp(height_, kFrameSizeAlignment); + size_t bytes_per_row = RoundUp(data_size_.width(), + kFrameSizeAlignment) * bytes_per_pixel; + size_t aligned_height = RoundUp(data_size_.height(), kFrameSizeAlignment); strides_[VideoFrame::kRGBPlane] = bytes_per_row; #if !defined(OS_ANDROID) // TODO(dalecurtis): use DataAligned or so, so this #ifdef hackery @@ -136,7 +140,7 @@ void VideoFrame::AllocateYUV() { // The *2 here is because some formats (e.g. h264) allow interlaced coding, // and then the size needs to be a multiple of two macroblocks (vertically). // See libavcodec/utils.c:avcodec_align_dimensions2(). - size_t y_height = RoundUp(height_, kFrameSizeAlignment * 2); + size_t y_height = RoundUp(data_size_.height(), kFrameSizeAlignment * 2); size_t uv_height = format_ == VideoFrame::YV12 ? y_height / 2 : y_height; size_t y_bytes = y_height * y_stride; size_t uv_bytes = uv_height * uv_stride; @@ -163,12 +167,12 @@ void VideoFrame::AllocateYUV() { } VideoFrame::VideoFrame(VideoFrame::Format format, - size_t width, - size_t height, + const gfx::Size& data_size, + const gfx::Size& natural_size, base::TimeDelta timestamp) : format_(format), - width_(width), - height_(height), + data_size_(data_size), + natural_size_(natural_size), texture_id_(0), texture_target_(0), timestamp_(timestamp) { @@ -223,17 +227,18 @@ int VideoFrame::stride(size_t plane) const { int VideoFrame::row_bytes(size_t plane) const { DCHECK(IsValidPlane(plane)); + int width = data_size_.width(); switch (format_) { // 32bpp. case RGB32: - return width_ * 4; + return width * 4; // Planar, 8bpp. case YV12: case YV16: if (plane == kYPlane) - return width_; - return RoundUp(width_, 2) / 2; + return width; + return RoundUp(width, 2) / 2; default: break; @@ -246,15 +251,16 @@ int VideoFrame::row_bytes(size_t plane) const { int VideoFrame::rows(size_t plane) const { DCHECK(IsValidPlane(plane)); + int height = data_size_.height(); switch (format_) { case RGB32: case YV16: - return height_; + return height; case YV12: if (plane == kYPlane) - return height_; - return RoundUp(height_, 2) / 2; + return height; + return RoundUp(height, 2) / 2; default: break; diff --git a/media/base/video_frame.h b/media/base/video_frame.h index 26a9dea..c40cb23 100644 --- a/media/base/video_frame.h +++ b/media/base/video_frame.h @@ -8,6 +8,7 @@ #include "base/callback.h" #include "base/md5.h" #include "media/base/buffers.h" +#include "ui/gfx/size.h" namespace media { @@ -39,27 +40,31 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { // Creates a new frame in system memory with given parameters. Buffers for // the frame are allocated but not initialized. + // |data_size| is the width and height of the frame data in pixels. + // |natural_size| is the width and height of the frame when the frame's aspect + // ratio is applied to |data_size|. static scoped_refptr<VideoFrame> CreateFrame( Format format, - size_t width, - size_t height, + const gfx::Size& data_size, + const gfx::Size& natural_size, base::TimeDelta timestamp); // Call prior to CreateFrame to ensure validity of frame configuration. Called // automatically by VideoDecoderConfig::IsValidConfig(). // TODO(scherkus): VideoDecoderConfig shouldn't call this method - static bool IsValidConfig( - Format format, - size_t width, - size_t height); + static bool IsValidConfig(Format format, const gfx::Size& data_size, + const gfx::Size& natural_size); // Wraps a native texture of the given parameters with a VideoFrame. When the // frame is destroyed |no_longer_needed.Run()| will be called. + // |data_size| is the width and height of the frame data in pixels. + // |natural_size| is the width and height of the frame when the frame's aspect + // ratio is applied to |size|. static scoped_refptr<VideoFrame> WrapNativeTexture( uint32 texture_id, uint32 texture_target, - size_t width, - size_t height, + const gfx::Size& data_size, + const gfx::Size& natural_size, base::TimeDelta timestamp, const base::Closure& no_longer_needed); @@ -69,13 +74,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { // Allocates YV12 frame based on |width| and |height|, and sets its data to // the YUV equivalent of RGB(0,0,0). - static scoped_refptr<VideoFrame> CreateBlackFrame(int width, int height); + static scoped_refptr<VideoFrame> CreateBlackFrame(const gfx::Size& size); Format format() const { return format_; } - size_t width() const { return width_; } - - size_t height() const { return height_; } + const gfx::Size& data_size() const { return data_size_; } + const gfx::Size& natural_size() const { return natural_size_; } int stride(size_t plane) const; @@ -115,8 +119,8 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { friend class base::RefCountedThreadSafe<VideoFrame>; // Clients must use the static CreateFrame() method to create a new frame. VideoFrame(Format format, - size_t video_width, - size_t video_height, + const gfx::Size& size, + const gfx::Size& natural_size, base::TimeDelta timestamp); virtual ~VideoFrame(); @@ -130,9 +134,12 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { // Frame format. Format format_; - // Width and height of surface. - size_t width_; - size_t height_; + // Width and height of the video frame. + gfx::Size data_size_; + + // Width and height of the video frame with aspect ratio taken + // into account. + gfx::Size natural_size_; // Array of strides for each plane, typically greater or equal to the width // of the surface divided by the horizontal sampling period. Note that diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc index e1dc1ff..ebbf92b 100644 --- a/media/base/video_frame_unittest.cc +++ b/media/base/video_frame_unittest.cc @@ -20,19 +20,19 @@ using base::MD5DigestToBase16; // frame will be black, if 1 then the entire frame will be white. void InitializeYV12Frame(VideoFrame* frame, double white_to_black) { EXPECT_EQ(VideoFrame::YV12, frame->format()); - size_t first_black_row = static_cast<size_t>(frame->height() * - white_to_black); + int first_black_row = static_cast<int>(frame->data_size().height() * + white_to_black); uint8* y_plane = frame->data(VideoFrame::kYPlane); - for (size_t row = 0; row < frame->height(); ++row) { + for (int row = 0; row < frame->data_size().height(); ++row) { int color = (row < first_black_row) ? 0xFF : 0x00; - memset(y_plane, color, frame->width()); + memset(y_plane, color, frame->data_size().width()); y_plane += frame->stride(VideoFrame::kYPlane); } uint8* u_plane = frame->data(VideoFrame::kUPlane); uint8* v_plane = frame->data(VideoFrame::kVPlane); - for (size_t row = 0; row < frame->height(); row += 2) { - memset(u_plane, 0x80, frame->width() / 2); - memset(v_plane, 0x80, frame->width() / 2); + for (int row = 0; row < frame->data_size().height(); row += 2) { + memset(u_plane, 0x80, frame->data_size().width() / 2); + memset(v_plane, 0x80, frame->data_size().width() / 2); u_plane += frame->stride(VideoFrame::kUPlane); v_plane += frame->stride(VideoFrame::kVPlane); } @@ -47,31 +47,31 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) { scoped_refptr<media::VideoFrame> rgb_frame; rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGB32, - yv12_frame->width(), - yv12_frame->height(), + yv12_frame->data_size(), + yv12_frame->natural_size(), yv12_frame->GetTimestamp()); - ASSERT_EQ(yv12_frame->width(), rgb_frame->width()); - ASSERT_EQ(yv12_frame->height(), rgb_frame->height()); + ASSERT_EQ(yv12_frame->data_size().width(), rgb_frame->data_size().width()); + ASSERT_EQ(yv12_frame->data_size().height(), rgb_frame->data_size().height()); media::ConvertYUVToRGB32(yv12_frame->data(VideoFrame::kYPlane), yv12_frame->data(VideoFrame::kUPlane), yv12_frame->data(VideoFrame::kVPlane), rgb_frame->data(VideoFrame::kRGBPlane), - rgb_frame->width(), - rgb_frame->height(), + rgb_frame->data_size().width(), + rgb_frame->data_size().height(), yv12_frame->stride(VideoFrame::kYPlane), yv12_frame->stride(VideoFrame::kUPlane), rgb_frame->stride(VideoFrame::kRGBPlane), media::YV12); - for (size_t row = 0; row < rgb_frame->height(); ++row) { + for (int row = 0; row < rgb_frame->data_size().height(); ++row) { uint32* rgb_row_data = reinterpret_cast<uint32*>( rgb_frame->data(VideoFrame::kRGBPlane) + (rgb_frame->stride(VideoFrame::kRGBPlane) * row)); - for (size_t col = 0; col < rgb_frame->width(); ++col) { + for (int col = 0; col < rgb_frame->data_size().width(); ++col) { SCOPED_TRACE( - base::StringPrintf("Checking (%" PRIuS ", %" PRIuS ")", row, col)); + base::StringPrintf("Checking (%d, %d)", row, col)); EXPECT_EQ(expect_rgb_color, rgb_row_data[col]); } } @@ -83,12 +83,13 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) { void ExpectFrameExtents(VideoFrame::Format format, int planes, int bytes_per_pixel, const char* expected_hash) { const unsigned char kFillByte = 0x80; - const size_t kWidth = 61; - const size_t kHeight = 31; + const int kWidth = 61; + const int kHeight = 31; const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337); + gfx::Size size(kWidth, kHeight); scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame( - format, kWidth, kHeight, kTimestamp); + format, size, size, kTimestamp); ASSERT_TRUE(frame); for(int plane = 0; plane < planes; plane++) { @@ -99,8 +100,8 @@ void ExpectFrameExtents(VideoFrame::Format format, int planes, EXPECT_TRUE(frame->row_bytes(plane)); if (plane == 0) { - EXPECT_EQ((size_t)frame->rows(plane), kHeight); - EXPECT_EQ((size_t)frame->row_bytes(plane), kWidth * bytes_per_pixel); + EXPECT_EQ(frame->rows(plane), kHeight); + EXPECT_EQ(frame->row_bytes(plane), kWidth * bytes_per_pixel); } memset(frame->data(plane), kFillByte, @@ -116,14 +117,14 @@ void ExpectFrameExtents(VideoFrame::Format format, int planes, } TEST(VideoFrame, CreateFrame) { - const size_t kWidth = 64; - const size_t kHeight = 48; + const int kWidth = 64; + const int kHeight = 48; const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337); // Create a YV12 Video Frame. + gfx::Size size(kWidth, kHeight); scoped_refptr<media::VideoFrame> frame = - VideoFrame::CreateFrame(media::VideoFrame::YV12, kWidth, kHeight, - kTimestamp); + VideoFrame::CreateFrame(media::VideoFrame::YV12, size, size, kTimestamp); ASSERT_TRUE(frame); // Test VideoFrame implementation. @@ -155,13 +156,13 @@ TEST(VideoFrame, CreateFrame) { } TEST(VideoFrame, CreateBlackFrame) { - const size_t kWidth = 2; - const size_t kHeight = 2; + const int kWidth = 2; + const int kHeight = 2; const uint8 kExpectedYRow[] = { 0, 0 }; const uint8 kExpectedUVRow[] = { 128 }; scoped_refptr<media::VideoFrame> frame = - VideoFrame::CreateBlackFrame(kWidth, kHeight); + VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight)); ASSERT_TRUE(frame); // Test basic properties. @@ -170,19 +171,19 @@ TEST(VideoFrame, CreateBlackFrame) { // Test |frame| properties. EXPECT_EQ(VideoFrame::YV12, frame->format()); - EXPECT_EQ(kWidth, frame->width()); - EXPECT_EQ(kHeight, frame->height()); + EXPECT_EQ(kWidth, frame->data_size().width()); + EXPECT_EQ(kHeight, frame->data_size().height()); // Test frames themselves. uint8* y_plane = frame->data(VideoFrame::kYPlane); - for (size_t y = 0; y < frame->height(); ++y) { + for (int y = 0; y < frame->data_size().height(); ++y) { EXPECT_EQ(0, memcmp(kExpectedYRow, y_plane, arraysize(kExpectedYRow))); y_plane += frame->stride(VideoFrame::kYPlane); } uint8* u_plane = frame->data(VideoFrame::kUPlane); uint8* v_plane = frame->data(VideoFrame::kVPlane); - for (size_t y = 0; y < frame->height() / 2; ++y) { + for (int y = 0; y < frame->data_size().height() / 2; ++y) { EXPECT_EQ(0, memcmp(kExpectedUVRow, u_plane, arraysize(kExpectedUVRow))); EXPECT_EQ(0, memcmp(kExpectedUVRow, v_plane, arraysize(kExpectedUVRow))); u_plane += frame->stride(VideoFrame::kUPlane); diff --git a/media/base/video_util_unittest.cc b/media/base/video_util_unittest.cc index 301566c..326907d 100644 --- a/media/base/video_util_unittest.cc +++ b/media/base/video_util_unittest.cc @@ -37,8 +37,9 @@ class VideoUtilTest : public testing::Test { } void CreateDestinationFrame(int width, int height) { + gfx::Size size(width, height); destination_frame_ = - VideoFrame::CreateFrame(VideoFrame::YV12, width, height, + VideoFrame::CreateFrame(VideoFrame::YV12, size, size, base::TimeDelta()); } diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index c2d34c2..f8931e0 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -75,14 +75,22 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, return AVERROR(EINVAL); DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); - int width = codec_context->width; - int height = codec_context->height; + gfx::Size size(codec_context->width, codec_context->height); int ret; - if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) + if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0) return ret; + gfx::Size natural_size; + if (codec_context->sample_aspect_ratio.num > 0) { + natural_size = GetNaturalSize(size, + codec_context->sample_aspect_ratio.num, + codec_context->sample_aspect_ratio.den); + } else { + natural_size = demuxer_stream_->video_decoder_config().natural_size(); + } + scoped_refptr<VideoFrame> video_frame = - VideoFrame::CreateFrame(format, width, height, kNoTimestamp()); + VideoFrame::CreateFrame(format, size, natural_size, kNoTimestamp()); for (int i = 0; i < 3; i++) { frame->base[i] = video_frame->data(i); @@ -185,7 +193,6 @@ void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, // Success! state_ = kNormal; av_frame_ = avcodec_alloc_frame(); - natural_size_ = config.natural_size(); status_cb.Run(PIPELINE_OK); } @@ -243,10 +250,6 @@ void FFmpegVideoDecoder::DoStop() { base::ResetAndReturn(&stop_cb_).Run(); } -const gfx::Size& FFmpegVideoDecoder::natural_size() { - return natural_size_; -} - void FFmpegVideoDecoder::set_decryptor(Decryptor* decryptor) { DCHECK_EQ(state_, kUninitialized); decryptor_ = decryptor; diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h index 4d26380..56402d4 100644 --- a/media/filters/ffmpeg_video_decoder.h +++ b/media/filters/ffmpeg_video_decoder.h @@ -31,7 +31,6 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder { virtual void Read(const ReadCB& read_cb) OVERRIDE; virtual void Reset(const base::Closure& closure) OVERRIDE; virtual void Stop(const base::Closure& closure) OVERRIDE; - virtual const gfx::Size& natural_size() OVERRIDE; // Must be called prior to initialization if decrypted buffers will be // encountered. @@ -107,14 +106,6 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder { AVCodecContext* codec_context_; AVFrame* av_frame_; - // Frame rate of the video. - int frame_rate_numerator_; - int frame_rate_denominator_; - - // TODO(scherkus): I think this should be calculated by VideoRenderers based - // on information provided by VideoDecoders (i.e., aspect ratio). - gfx::Size natural_size_; - // Pointer to the demuxer stream that will feed us compressed buffers. scoped_refptr<DemuxerStream> demuxer_stream_; diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc index 1e5f468..9d97c82 100644 --- a/media/filters/ffmpeg_video_decoder_unittest.cc +++ b/media/filters/ffmpeg_video_decoder_unittest.cc @@ -101,7 +101,7 @@ class FFmpegVideoDecoderTest : public testing::Test { void InitializeWithConfigAndStatus(const VideoDecoderConfig& config, PipelineStatus status) { EXPECT_CALL(*demuxer_, video_decoder_config()) - .WillOnce(ReturnRef(config)); + .WillRepeatedly(ReturnRef(config)); decoder_->Initialize(demuxer_, NewExpectedStatusCB(status), base::Bind(&MockStatisticsCB::OnStatistics, @@ -167,8 +167,8 @@ class FFmpegVideoDecoderTest : public testing::Test { // the file named |test_file_name|. This function expects both buffers // to decode to frames that are the same size. void DecodeIFrameThenTestFile(const std::string& test_file_name, - size_t expected_width, - size_t expected_height) { + int expected_width, + int expected_height) { Initialize(); VideoDecoder::DecoderStatus status_a; @@ -189,17 +189,15 @@ class FFmpegVideoDecoderTest : public testing::Test { Read(&status_a, &video_frame_a); Read(&status_b, &video_frame_b); - size_t original_width = static_cast<size_t>(kVisibleRect.width()); - size_t original_height = static_cast<size_t>(kVisibleRect.height()); - + gfx::Size original_size = kVisibleRect.size(); EXPECT_EQ(status_a, VideoDecoder::kOk); EXPECT_EQ(status_b, VideoDecoder::kOk); ASSERT_TRUE(video_frame_a); ASSERT_TRUE(video_frame_b); - EXPECT_EQ(original_width, video_frame_a->width()); - EXPECT_EQ(original_height, video_frame_a->height()); - EXPECT_EQ(expected_width, video_frame_b->width()); - EXPECT_EQ(expected_height, video_frame_b->height()); + EXPECT_EQ(original_size.width(), video_frame_a->data_size().width()); + EXPECT_EQ(original_size.height(), video_frame_a->data_size().height()); + EXPECT_EQ(expected_width, video_frame_b->data_size().width()); + EXPECT_EQ(expected_height, video_frame_b->data_size().height()); } void Read(VideoDecoder::DecoderStatus* status, diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc index 848ae66..fc1afeb 100644 --- a/media/filters/gpu_video_decoder.cc +++ b/media/filters/gpu_video_decoder.cc @@ -36,6 +36,13 @@ GpuVideoDecoder::BufferPair::BufferPair( GpuVideoDecoder::BufferPair::~BufferPair() {} +GpuVideoDecoder::BufferData::BufferData( + int32 bbid, base::TimeDelta ts, const gfx::Size& ns) + : bitstream_buffer_id(bbid), timestamp(ts), natural_size(ns) { +} + +GpuVideoDecoder::BufferData::~BufferData() {} + GpuVideoDecoder::GpuVideoDecoder( MessageLoop* message_loop, MessageLoop* vda_loop, @@ -155,8 +162,6 @@ void GpuVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, if (config.codec() == kCodecH264) demuxer_stream_->EnableBitstreamConverter(); - natural_size_ = config.natural_size(); - DVLOG(1) << "GpuVideoDecoder::Initialize() succeeded."; vda_loop_proxy_->PostTaskAndReply( FROM_HERE, @@ -258,39 +263,39 @@ void GpuVideoDecoder::RequestBufferDecode( bool inserted = bitstream_buffers_in_decoder_.insert(std::make_pair( bitstream_buffer.id(), BufferPair(shm_buffer, buffer))).second; DCHECK(inserted); - RecordBufferTimeData(bitstream_buffer, *buffer); + RecordBufferData(bitstream_buffer, *buffer); vda_loop_proxy_->PostTask(FROM_HERE, base::Bind( &VideoDecodeAccelerator::Decode, weak_vda_, bitstream_buffer)); } -void GpuVideoDecoder::RecordBufferTimeData( +void GpuVideoDecoder::RecordBufferData( const BitstreamBuffer& bitstream_buffer, const Buffer& buffer) { - input_buffer_time_data_.push_front(BufferTimeData( - bitstream_buffer.id(), buffer.GetTimestamp())); + input_buffer_data_.push_front(BufferData( + bitstream_buffer.id(), buffer.GetTimestamp(), + demuxer_stream_->video_decoder_config().natural_size())); // Why this value? Because why not. avformat.h:MAX_REORDER_DELAY is 16, but // that's too small for some pathological B-frame test videos. The cost of // using too-high a value is low (192 bits per extra slot). - static const size_t kMaxInputBufferTimeDataSize = 128; + static const size_t kMaxInputBufferDataSize = 128; // Pop from the back of the list, because that's the oldest and least likely // to be useful in the future data. - if (input_buffer_time_data_.size() > kMaxInputBufferTimeDataSize) - input_buffer_time_data_.pop_back(); + if (input_buffer_data_.size() > kMaxInputBufferDataSize) + input_buffer_data_.pop_back(); } -base::TimeDelta GpuVideoDecoder::GetBufferTimestamp(int32 id) { - for (std::list<BufferTimeData>::const_iterator it = - input_buffer_time_data_.begin(); it != input_buffer_time_data_.end(); +void GpuVideoDecoder::GetBufferData(int32 id, base::TimeDelta* timestamp, + gfx::Size* natural_size) { + for (std::list<BufferData>::const_iterator it = + input_buffer_data_.begin(); it != input_buffer_data_.end(); ++it) { - if (it->first == id) - return it->second; + if (it->bitstream_buffer_id != id) + continue; + *timestamp = it->timestamp; + *natural_size = it->natural_size; + return; } NOTREACHED() << "Missing bitstreambuffer id: " << id; - return kNoTimestamp(); -} - -const gfx::Size& GpuVideoDecoder::natural_size() { - return natural_size_; } bool GpuVideoDecoder::HasAlpha() const { @@ -376,11 +381,13 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) { const PictureBuffer& pb = it->second; // Update frame's timestamp. - base::TimeDelta timestamp = GetBufferTimestamp(picture.bitstream_buffer_id()); + base::TimeDelta timestamp; + gfx::Size natural_size; + GetBufferData(picture.bitstream_buffer_id(), ×tamp, &natural_size); DCHECK(decoder_texture_target_); scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture( - pb.texture_id(), decoder_texture_target_, pb.size().width(), - pb.size().height(), timestamp, + pb.texture_id(), decoder_texture_target_, pb.size(), natural_size, + timestamp, base::Bind(&GpuVideoDecoder::ReusePictureBuffer, this, picture.picture_buffer_id()))); @@ -525,7 +532,7 @@ void GpuVideoDecoder::NotifyResetDone() { // This needs to happen after the Reset() on vda_ is done to ensure pictures // delivered during the reset can find their time data. - input_buffer_time_data_.clear(); + input_buffer_data_.clear(); if (!pending_reset_cb_.is_null()) base::ResetAndReturn(&pending_reset_cb_).Run(); diff --git a/media/filters/gpu_video_decoder.h b/media/filters/gpu_video_decoder.h index af6dcf8..898537b 100644 --- a/media/filters/gpu_video_decoder.h +++ b/media/filters/gpu_video_decoder.h @@ -67,7 +67,6 @@ class MEDIA_EXPORT GpuVideoDecoder virtual void Read(const ReadCB& read_cb) OVERRIDE; virtual void Reset(const base::Closure& closure) OVERRIDE; virtual void Stop(const base::Closure& closure) OVERRIDE; - virtual const gfx::Size& natural_size() OVERRIDE; virtual bool HasAlpha() const OVERRIDE; virtual void PrepareForShutdownHack() OVERRIDE; @@ -117,9 +116,10 @@ class MEDIA_EXPORT GpuVideoDecoder // Indicate the picturebuffer can be reused by the decoder. void ReusePictureBuffer(int64 picture_buffer_id); - void RecordBufferTimeData( + void RecordBufferData( const BitstreamBuffer& bitstream_buffer, const Buffer& buffer); - base::TimeDelta GetBufferTimestamp(int32 id); + void GetBufferData(int32 id, base::TimeDelta* timetamp, + gfx::Size* natural_size); // Set |vda_| and |weak_vda_| on the VDA thread (in practice the render // thread). @@ -142,10 +142,6 @@ class MEDIA_EXPORT GpuVideoDecoder StatisticsCB statistics_cb_; - // TODO(scherkus): I think this should be calculated by VideoRenderers based - // on information provided by VideoDecoders (i.e., aspect ratio). - gfx::Size natural_size_; - // Pointer to the demuxer stream that will feed us compressed buffers. scoped_refptr<DemuxerStream> demuxer_stream_; @@ -194,9 +190,15 @@ class MEDIA_EXPORT GpuVideoDecoder // The texture target used for decoded pictures. uint32 decoder_texture_target_; - // Maintains bitstream buffer ID to timestamp mappings. - typedef std::pair<int32, base::TimeDelta> BufferTimeData; - std::list<BufferTimeData> input_buffer_time_data_; + struct BufferData { + BufferData(int32 bbid, base::TimeDelta ts, + const gfx::Size& natural_size); + ~BufferData(); + int32 bitstream_buffer_id; + base::TimeDelta timestamp; + gfx::Size natural_size; + }; + std::list<BufferData> input_buffer_data_; // picture_buffer_id and the frame wrapping the corresponding Picture, for // frames that have been decoded but haven't been requested by a Read() yet. diff --git a/media/filters/video_frame_generator.cc b/media/filters/video_frame_generator.cc index b8819d4..8c90f2e 100644 --- a/media/filters/video_frame_generator.cc +++ b/media/filters/video_frame_generator.cc @@ -16,7 +16,7 @@ VideoFrameGenerator::VideoFrameGenerator( const gfx::Size& size, const base::TimeDelta& frame_duration) : message_loop_proxy_(message_loop_proxy), - natural_size_(size), + size_(size), stopped_(true), frame_duration_(frame_duration) { } @@ -49,10 +49,6 @@ void VideoFrameGenerator::Stop(const base::Closure& closure) { base::Bind(&VideoFrameGenerator::StopOnDecoderThread, this, closure)); } -const gfx::Size& VideoFrameGenerator::natural_size() { - return natural_size_; -} - VideoFrameGenerator::~VideoFrameGenerator() {} void VideoFrameGenerator::InitializeOnDecoderThread( @@ -76,10 +72,7 @@ void VideoFrameGenerator::ReadOnDecoderThread(const ReadCB& read_cb) { // // TODO(scherkus): migrate this to proper buffer recycling. scoped_refptr<VideoFrame> video_frame = - VideoFrame::CreateFrame(VideoFrame::YV12, - natural_size_.width(), - natural_size_.height(), - current_time_); + VideoFrame::CreateFrame(VideoFrame::YV12, size_, size_, current_time_); current_time_ += frame_duration_; // TODO(wjia): set pixel data to pre-defined patterns if it's desired to diff --git a/media/filters/video_frame_generator.h b/media/filters/video_frame_generator.h index 5457145..2d32b55 100644 --- a/media/filters/video_frame_generator.h +++ b/media/filters/video_frame_generator.h @@ -35,7 +35,6 @@ class MEDIA_EXPORT VideoFrameGenerator : public VideoDecoder { virtual void Read(const ReadCB& read_cb) OVERRIDE; virtual void Reset(const base::Closure& closure) OVERRIDE; virtual void Stop(const base::Closure& closure) OVERRIDE; - virtual const gfx::Size& natural_size() OVERRIDE; protected: virtual ~VideoFrameGenerator(); @@ -50,7 +49,7 @@ class MEDIA_EXPORT VideoFrameGenerator : public VideoDecoder { void StopOnDecoderThread(const base::Closure& closure); scoped_refptr<base::MessageLoopProxy> message_loop_proxy_; - gfx::Size natural_size_; + gfx::Size size_; bool stopped_; base::TimeDelta current_time_; diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc index 481c784..e4d90c7 100644 --- a/media/filters/video_renderer_base.cc +++ b/media/filters/video_renderer_base.cc @@ -137,9 +137,6 @@ void VideoRendererBase::Initialize(const scoped_refptr<VideoDecoder>& decoder, get_time_cb_ = get_time_cb; get_duration_cb_ = get_duration_cb; - // Notify the pipeline of the video dimensions. - size_changed_cb_.Run(decoder_->natural_size()); - // We're all good! Consider ourselves flushed. (ThreadMain() should never // see us in the kUninitialized state). // Since we had an initial Preroll(), we consider ourself flushed, because we @@ -300,8 +297,7 @@ void VideoRendererBase::ThreadMain() { // signal to the client that a new frame is available. DCHECK(!pending_paint_); DCHECK(!ready_frames_.empty()); - current_frame_ = ready_frames_.front(); - ready_frames_.pop_front(); + SetCurrentFrameToNextReadyFrame(); AttemptRead_Locked(); base::AutoUnlock auto_unlock(lock_); @@ -309,6 +305,18 @@ void VideoRendererBase::ThreadMain() { } } +void VideoRendererBase::SetCurrentFrameToNextReadyFrame() { + current_frame_ = ready_frames_.front(); + ready_frames_.pop_front(); + + // Notify the pipeline of natural_size() changes. + const gfx::Size& natural_size = current_frame_->natural_size(); + if (natural_size != last_natural_size_) { + size_changed_cb_.Run(natural_size); + last_natural_size_ = natural_size; + } +} + void VideoRendererBase::GetCurrentFrame(scoped_refptr<VideoFrame>* frame_out) { base::AutoLock auto_lock(lock_); DCHECK(!pending_paint_ && !pending_paint_with_last_available_); @@ -457,10 +465,8 @@ void VideoRendererBase::FrameReady(VideoDecoder::DecoderStatus status, // Because we might remain in the prerolled state for an undetermined amount // of time (i.e., we were not playing before we started prerolling), we'll // manually update the current frame and notify the subclass below. - if (!ready_frames_.front()->IsEndOfStream()) { - current_frame_ = ready_frames_.front(); - ready_frames_.pop_front(); - } + if (!ready_frames_.front()->IsEndOfStream()) + SetCurrentFrameToNextReadyFrame(); // ...and we're done prerolling! DCHECK(!preroll_cb_.is_null()); diff --git a/media/filters/video_renderer_base.h b/media/filters/video_renderer_base.h index 6bd2dae..276a24b 100644 --- a/media/filters/video_renderer_base.h +++ b/media/filters/video_renderer_base.h @@ -116,6 +116,10 @@ class MEDIA_EXPORT VideoRendererBase // Return the number of frames currently held by this class. int NumFrames_Locked() const; + // Updates |current_frame_| to the next frame on |ready_frames_| and calls + // |size_changed_cb_| if the natural size changes. + void SetCurrentFrameToNextReadyFrame(); + // Used for accessing data members. base::Lock lock_; @@ -228,6 +232,9 @@ class MEDIA_EXPORT VideoRendererBase // opaque. SetOpaqueCB set_opaque_cb_; + // The last natural size |size_changed_cb_| was called with. + gfx::Size last_natural_size_; + DISALLOW_COPY_AND_ASSIGN(VideoRendererBase); }; diff --git a/media/filters/video_renderer_base_unittest.cc b/media/filters/video_renderer_base_unittest.cc index 9b38dfa..d08670e 100644 --- a/media/filters/video_renderer_base_unittest.cc +++ b/media/filters/video_renderer_base_unittest.cc @@ -94,9 +94,6 @@ class VideoRendererBaseTest : public ::testing::Test { InSequence s; - // We expect the video size to be set. - EXPECT_CALL(*this, OnNaturalSizeChanged(kNaturalSize)); - // Set playback rate before anything else happens. renderer_->SetPlaybackRate(1.0f); @@ -116,6 +113,9 @@ class VideoRendererBaseTest : public ::testing::Test { base::Bind(&VideoRendererBaseTest::GetDuration, base::Unretained(this))); + // We expect the video size to be set. + EXPECT_CALL(*this, OnNaturalSizeChanged(kNaturalSize)); + // Start prerolling. Preroll(0); } @@ -253,8 +253,7 @@ class VideoRendererBaseTest : public ::testing::Test { // Creates a frame with given timestamp. scoped_refptr<VideoFrame> CreateFrame(int64 timestamp) { scoped_refptr<VideoFrame> frame = - VideoFrame::CreateFrame(VideoFrame::RGB32, kNaturalSize.width(), - kNaturalSize.height(), + VideoFrame::CreateFrame(VideoFrame::RGB32, kNaturalSize, kNaturalSize, base::TimeDelta::FromMicroseconds(timestamp)); return frame; } diff --git a/media/media.gyp b/media/media.gyp index 96da4ff..f9c362f 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -847,6 +847,7 @@ 'yuv_convert', '../base/base.gyp:base', '../skia/skia.gyp:skia', + '../ui/ui.gyp:ui', ], 'sources': [ 'tools/scaler_bench/scaler_bench.cc', @@ -920,6 +921,7 @@ 'yuv_convert', '../base/base.gyp:base', '../ui/gl/gl.gyp:gl', + '../ui/ui.gyp:ui', ], 'sources': [ 'tools/shader_bench/cpu_color_painter.cc', @@ -985,6 +987,7 @@ 'yuv_convert', '../base/base.gyp:base', '../ui/gl/gl.gyp:gl', + '../ui/ui.gyp:ui', ], 'link_settings': { 'libraries': [ diff --git a/media/tools/player_wtl/view.h b/media/tools/player_wtl/view.h index 7519b09..b0e4737 100644 --- a/media/tools/player_wtl/view.h +++ b/media/tools/player_wtl/view.h @@ -144,11 +144,11 @@ class WtlVideoWindow : public CScrollWindowImpl<WtlVideoWindow> { uint8 *movie_dib_bits = reinterpret_cast<uint8 *>(bm.bmBits) + bm.bmWidthBytes * (bm.bmHeight - 1); int dibrowbytes = -bm.bmWidthBytes; - int clipped_width = video_frame->width(); + int clipped_width = video_frame->data_size().width(); if (dibwidth < clipped_width) { clipped_width = dibwidth; } - int clipped_height = video_frame->height(); + int clipped_height = video_frame->data_size().height(); if (dibheight < clipped_height) { clipped_height = dibheight; } @@ -243,7 +243,7 @@ class WtlVideoWindow : public CScrollWindowImpl<WtlVideoWindow> { if (frame) { // Size the window the first time we get a frame. if (!last_frame_) - SetSize(frame->width(), frame->height()); + SetSize(frame->data_size().width(), frame->data_size().height()); base::TimeDelta frame_timestamp = frame->GetTimestamp(); if (frame != last_frame_ || frame_timestamp != last_timestamp_) { @@ -435,25 +435,25 @@ class WtlVideoWindow : public CScrollWindowImpl<WtlVideoWindow> { FILE * file_yuv = fopen("raw.yuv", "ab+"); // Open for append binary. if (file_yuv != NULL) { fseek(file_yuv, 0, SEEK_END); - const size_t frame_size = - video_frame->width() * video_frame->height(); - for (size_t y = 0; y < video_frame->height(); ++y) + const int frame_size = + video_frame->data_size().width() * video_frame->data_size().height(); + for (int y = 0; y < video_frame->data_size().height(); ++y) fwrite(video_frame->data(0) + video_frame->stride(0)*y, - video_frame->width(), sizeof(uint8), file_yuv); - for (size_t y = 0; y < video_frame->height()/2; ++y) + video_frame->data_size().width(), sizeof(uint8), file_yuv); + for (int y = 0; y < video_frame->data_size().height()/2; ++y) fwrite(video_frame->data(1) + video_frame->stride(1)*y, - video_frame->width() / 2, sizeof(uint8), file_yuv); - for (size_t y = 0; y < video_frame->height()/2; ++y) + video_frame->data_size().width() / 2, sizeof(uint8), file_yuv); + for (int y = 0; y < video_frame->data_size().height()/2; ++y) fwrite(video_frame->data(2) + video_frame->stride(2)*y, - video_frame->width() / 2, sizeof(uint8), file_yuv); + video_frame->data_size().width() / 2, sizeof(uint8), file_yuv); fclose(file_yuv); #if TESTING static int frame_dump_count = 0; char outputbuf[512]; _snprintf_s(outputbuf, sizeof(outputbuf), "yuvdump %4d %dx%d stride %d\n", - frame_dump_count, video_frame->width(), - video_frame->height(), + frame_dump_count, video_frame->data_size().width(), + video_frame->data_size().height(), video_frame->stride(0)); OutputDebugStringA(outputbuf); ++frame_dump_count; diff --git a/media/tools/player_x11/gl_video_renderer.cc b/media/tools/player_x11/gl_video_renderer.cc index f39a464..50abdd9 100644 --- a/media/tools/player_x11/gl_video_renderer.cc +++ b/media/tools/player_x11/gl_video_renderer.cc @@ -121,7 +121,8 @@ GlVideoRenderer::~GlVideoRenderer() { void GlVideoRenderer::Paint(media::VideoFrame* video_frame) { if (!gl_context_) - Initialize(video_frame->width(), video_frame->height()); + Initialize(video_frame->data_size().width(), + video_frame->data_size().height()); // Convert YUV frame to RGB. DCHECK(video_frame->format() == media::VideoFrame::YV12 || diff --git a/media/tools/player_x11/x11_video_renderer.cc b/media/tools/player_x11/x11_video_renderer.cc index b1f0cff..d259d6b 100644 --- a/media/tools/player_x11/x11_video_renderer.cc +++ b/media/tools/player_x11/x11_video_renderer.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -84,8 +84,8 @@ X11VideoRenderer::~X11VideoRenderer() { } void X11VideoRenderer::Paint(media::VideoFrame* video_frame) { - int width = video_frame->width(); - int height = video_frame->height(); + int width = video_frame->data_size().width(); + int height = video_frame->data_size().height(); if (!image_) Initialize(width, height); @@ -110,8 +110,8 @@ void X11VideoRenderer::Paint(media::VideoFrame* video_frame) { video_frame->data(media::VideoFrame::kUPlane), video_frame->data(media::VideoFrame::kVPlane), (uint8*)image_->data, - video_frame->width(), - video_frame->height(), + video_frame->data_size().width(), + video_frame->data_size().height(), video_frame->stride(media::VideoFrame::kYPlane), video_frame->stride(media::VideoFrame::kUPlane), image_->bytes_per_line, diff --git a/media/tools/scaler_bench/scaler_bench.cc b/media/tools/scaler_bench/scaler_bench.cc index 3337cdb..d8525d1 100644 --- a/media/tools/scaler_bench/scaler_bench.cc +++ b/media/tools/scaler_bench/scaler_bench.cc @@ -34,7 +34,7 @@ static double BenchmarkSkia() { ScopedVector<SkBitmap> dest_frames; for (int i = 0; i < num_buffers; i++) { source_frames.push_back( - VideoFrame::CreateBlackFrame(source_width, source_height)); + VideoFrame::CreateBlackFrame(gfx::Size(source_width, source_height))); SkBitmap* bitmap = new SkBitmap(); bitmap->setConfig(SkBitmap::kARGB_8888_Config, @@ -118,12 +118,11 @@ static double BenchmarkFilter(media::ScaleFilter filter) { for (int i = 0; i < num_buffers; i++) { source_frames.push_back( - VideoFrame::CreateBlackFrame(source_width, source_height)); + VideoFrame::CreateBlackFrame(gfx::Size(source_width, source_height))); + gfx::Size dest_size(dest_width, dest_height); dest_frames.push_back( - VideoFrame::CreateFrame(VideoFrame::RGB32, - dest_width, - dest_height, + VideoFrame::CreateFrame(VideoFrame::RGB32, dest_size, dest_size, TimeDelta::FromSeconds(0))); } @@ -157,12 +156,11 @@ static double BenchmarkScaleWithRect() { for (int i = 0; i < num_buffers; i++) { source_frames.push_back( - VideoFrame::CreateBlackFrame(source_width, source_height)); + VideoFrame::CreateBlackFrame(gfx::Size(source_width, source_height))); + gfx::Size dest_size(dest_width, dest_height); dest_frames.push_back( - VideoFrame::CreateFrame(VideoFrame::RGB32, - dest_width, - dest_height, + VideoFrame::CreateFrame(VideoFrame::RGB32, dest_size, dest_size, TimeDelta::FromSeconds(0))); } diff --git a/media/tools/shader_bench/cpu_color_painter.cc b/media/tools/shader_bench/cpu_color_painter.cc index 95314d0..b99b803 100644 --- a/media/tools/shader_bench/cpu_color_painter.cc +++ b/media/tools/shader_bench/cpu_color_painter.cc @@ -67,24 +67,24 @@ void CPUColorPainter::Paint(scoped_refptr<media::VideoFrame> video_frame) { // Convert to RGB32 frame. scoped_refptr<media::VideoFrame> rgba_frame = media::VideoFrame::CreateFrame(media::VideoFrame::RGB32, - video_frame->width(), - video_frame->height(), + video_frame->data_size(), + video_frame->natural_size(), base::TimeDelta()); media::ConvertYUVToRGB32(video_frame->data(media::VideoFrame::kYPlane), video_frame->data(media::VideoFrame::kUPlane), video_frame->data(media::VideoFrame::kVPlane), rgba_frame->data(0), - video_frame->width(), - video_frame->height(), + video_frame->data_size().width(), + video_frame->data_size().height(), video_frame->stride(media::VideoFrame::kYPlane), video_frame->stride(media::VideoFrame::kUPlane), rgba_frame->stride(0), media::YV12); glBindTexture(GL_TEXTURE_2D, textures_[0]); - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, rgba_frame->width(), - rgba_frame->height(), GL_RGBA, GL_UNSIGNED_BYTE, + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, rgba_frame->data_size().width(), + rgba_frame->data_size().height(), GL_RGBA, GL_UNSIGNED_BYTE, rgba_frame->data(0)); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); diff --git a/media/tools/shader_bench/gpu_color_painter.cc b/media/tools/shader_bench/gpu_color_painter.cc index 2c81d2b..a408de2 100644 --- a/media/tools/shader_bench/gpu_color_painter.cc +++ b/media/tools/shader_bench/gpu_color_painter.cc @@ -104,13 +104,15 @@ void GPUColorWithLuminancePainter::Initialize(int width, int height) { void GPUColorWithLuminancePainter::Paint( scoped_refptr<media::VideoFrame> video_frame) { + int width = video_frame->data_size().width(); + int height = video_frame->data_size().height(); for (unsigned int i = 0; i < kNumYUVPlanes; ++i) { - unsigned int width = (i == media::VideoFrame::kYPlane) ? - video_frame->width() : video_frame->width() / 2; - unsigned int height = (i == media::VideoFrame::kYPlane) ? - video_frame->height() : video_frame->height() / 2; + unsigned int plane_width = + (i == media::VideoFrame::kYPlane) ? width : width / 2; + unsigned int plane_height = + (i == media::VideoFrame::kYPlane) ? height : height / 2; glBindTexture(GL_TEXTURE_2D, textures_[i]); - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, plane_width, plane_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, video_frame->data(i)); } diff --git a/media/tools/shader_bench/shader_bench.cc b/media/tools/shader_bench/shader_bench.cc index 30a548f..17a45a4 100644 --- a/media/tools/shader_bench/shader_bench.cc +++ b/media/tools/shader_bench/shader_bench.cc @@ -50,11 +50,10 @@ void GetFrames(std::string file_name, long frame_size = CalculateYUVFrameSize(file_handle, num_frames); + gfx::Size size(width, height); for (int i = 0; i < num_frames; i++) { scoped_refptr<media::VideoFrame> video_frame = - media::VideoFrame::CreateFrame(media::VideoFrame::YV12, - width, - height, + media::VideoFrame::CreateFrame(media::VideoFrame::YV12, size, size, base::TimeDelta()); long bytes_read = fread(video_frame->data(0), 1, frame_size, file_handle); diff --git a/webkit/media/android/webmediaplayer_android.cc b/webkit/media/android/webmediaplayer_android.cc index ef457b25..8032a8c 100644 --- a/webkit/media/android/webmediaplayer_android.cc +++ b/webkit/media/android/webmediaplayer_android.cc @@ -453,7 +453,8 @@ void WebMediaPlayerAndroid::OnVideoSizeChanged(int width, int height) { natural_size_.height = height; if (texture_id_) { video_frame_.reset(new WebVideoFrameImpl(VideoFrame::WrapNativeTexture( - texture_id_, kGLTextureExternalOES, width, height, base::TimeDelta(), + texture_id_, kGLTextureExternalOES, natural_size_, natural_size_, + base::TimeDelta(), base::Closure()))); } } diff --git a/webkit/media/skcanvas_video_renderer.cc b/webkit/media/skcanvas_video_renderer.cc index 0dfa8e2..5b8db9f 100644 --- a/webkit/media/skcanvas_video_renderer.cc +++ b/webkit/media/skcanvas_video_renderer.cc @@ -109,19 +109,19 @@ static void FastPaint( DCHECK_NE(0, dest_rect.width()); DCHECK_NE(0, dest_rect.height()); size_t frame_clip_width = local_dest_irect.width() * - video_frame->width() / local_dest_irect_saved.width(); + video_frame->data_size().width() / local_dest_irect_saved.width(); size_t frame_clip_height = local_dest_irect.height() * - video_frame->height() / local_dest_irect_saved.height(); + video_frame->data_size().height() / local_dest_irect_saved.height(); // Project the "left" and "top" of the final destination rect to local // coordinates of the video frame, use these values to find the offsets // in the video frame to start reading. size_t frame_clip_left = (local_dest_irect.fLeft - local_dest_irect_saved.fLeft) * - video_frame->width() / local_dest_irect_saved.width(); + video_frame->data_size().width() / local_dest_irect_saved.width(); size_t frame_clip_top = (local_dest_irect.fTop - local_dest_irect_saved.fTop) * - video_frame->height() / local_dest_irect_saved.height(); + video_frame->data_size().height() / local_dest_irect_saved.height(); // Use the "left" and "top" of the destination rect to locate the offset // in Y, U and V planes. @@ -171,11 +171,11 @@ static void ConvertVideoFrameToBitmap( // Check if |bitmap| needs to be (re)allocated. if (bitmap->isNull() || - bitmap->width() != static_cast<int>(video_frame->width()) || - bitmap->height() != static_cast<int>(video_frame->height())) { + bitmap->width() != video_frame->data_size().width() || + bitmap->height() != video_frame->data_size().height()) { bitmap->setConfig(SkBitmap::kARGB_8888_Config, - video_frame->width(), - video_frame->height()); + video_frame->data_size().width(), + video_frame->data_size().height()); bitmap->allocPixels(); bitmap->setIsVolatile(true); } @@ -188,8 +188,8 @@ static void ConvertVideoFrameToBitmap( video_frame->data(media::VideoFrame::kUPlane), video_frame->data(media::VideoFrame::kVPlane), static_cast<uint8*>(bitmap->getPixels()), - video_frame->width(), - video_frame->height(), + video_frame->data_size().width(), + video_frame->data_size().height(), video_frame->stride(media::VideoFrame::kYPlane), video_frame->stride(media::VideoFrame::kUPlane), bitmap->rowBytes(), diff --git a/webkit/media/skcanvas_video_renderer_unittest.cc b/webkit/media/skcanvas_video_renderer_unittest.cc index f2a1c88..f83d75a 100644 --- a/webkit/media/skcanvas_video_renderer_unittest.cc +++ b/webkit/media/skcanvas_video_renderer_unittest.cc @@ -76,9 +76,11 @@ class SkCanvasVideoRendererTest : public testing::Test { }; SkCanvasVideoRendererTest::SkCanvasVideoRendererTest() - : natural_frame_(VideoFrame::CreateBlackFrame(kWidth, kHeight)), - larger_frame_(VideoFrame::CreateBlackFrame(kWidth * 2, kHeight * 2)), - smaller_frame_(VideoFrame::CreateBlackFrame(kWidth / 2, kHeight / 2)), + : natural_frame_(VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight))), + larger_frame_(VideoFrame::CreateBlackFrame( + gfx::Size(kWidth * 2, kHeight * 2))), + smaller_frame_(VideoFrame::CreateBlackFrame( + gfx::Size(kWidth / 2, kHeight / 2))), fast_path_device_(SkBitmap::kARGB_8888_Config, kWidth, kHeight, true), fast_path_canvas_(&fast_path_device_), slow_path_device_(SkBitmap::kARGB_8888_Config, kWidth, kHeight, false), diff --git a/webkit/media/webvideoframe_impl.cc b/webkit/media/webvideoframe_impl.cc index d6e178c..51532c9 100644 --- a/webkit/media/webvideoframe_impl.cc +++ b/webkit/media/webvideoframe_impl.cc @@ -48,13 +48,13 @@ WebVideoFrame::Format WebVideoFrameImpl::format() const { unsigned WebVideoFrameImpl::width() const { if (video_frame_.get()) - return video_frame_->width(); + return video_frame_->data_size().width(); return 0; } unsigned WebVideoFrameImpl::height() const { if (video_frame_.get()) - return video_frame_->height(); + return video_frame_->data_size().height(); return 0; } |