diff options
author | mek@chromium.org <mek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-04-16 22:29:31 +0000 |
---|---|---|
committer | mek@chromium.org <mek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-04-16 22:29:31 +0000 |
commit | d5825e11bd7457e7c48bc364865a7f1b33ac9fb2 (patch) | |
tree | 414e2bbc7c044d64cad9dc5cef7565ebe4b05654 | |
parent | 92668ad271d3b81e8076b60dd40744f4c11b0396 (diff) | |
download | chromium_src-d5825e11bd7457e7c48bc364865a7f1b33ac9fb2.zip chromium_src-d5825e11bd7457e7c48bc364865a7f1b33ac9fb2.tar.gz chromium_src-d5825e11bd7457e7c48bc364865a7f1b33ac9fb2.tar.bz2 |
Revert 194465 "media: Add support for playback for VP8 Alpha vid..."
Broke linux compilation http://build.chromium.org/p/chromium.linux/buildstatus?builder=Linux%20Builder%20%28dbg%29&number=47451
> media: Add support for playback for VP8 Alpha video streams.
>
> BUG=147355
> TEST=VP8 Alpha video streams play
>
> Review URL: https://codereview.chromium.org/12263013
>
> Patch from Vignesh Venkatasubramanian <vigneshv@chromium.org>.
TBR=tomfinegan@chromium.org
Review URL: https://codereview.chromium.org/13972014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@194469 0039d316-1c4b-4281-b951-d872f2087c98
31 files changed, 103 insertions, 900 deletions
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc index 2e68ea9..11bfc31 100644 --- a/cc/resources/video_resource_updater.cc +++ b/cc/resources/video_resource_updater.cc @@ -46,7 +46,6 @@ bool VideoResourceUpdater::VerifyFrame( switch (video_frame->format()) { // Acceptable inputs. case media::VideoFrame::YV12: - case media::VideoFrame::YV12A: case media::VideoFrame::YV16: case media::VideoFrame::NATIVE_TEXTURE: #if defined(GOOGLE_TV) @@ -77,7 +76,6 @@ static gfx::Size SoftwarePlaneDimension( switch (input_frame_format) { case media::VideoFrame::YV12: - case media::VideoFrame::YV12A: return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 0.5f)); case media::VideoFrame::YV16: return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 1.f)); @@ -115,10 +113,8 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( // Only YUV software video frames are supported. DCHECK(input_frame_format == media::VideoFrame::YV12 || - input_frame_format == media::VideoFrame::YV12A || input_frame_format == media::VideoFrame::YV16); if (input_frame_format != media::VideoFrame::YV12 && - input_frame_format != media::VideoFrame::YV12A && input_frame_format != media::VideoFrame::YV16) return VideoFrameExternalResources(); diff --git a/chrome/app/generated_resources.grd b/chrome/app/generated_resources.grd index 784c111..e755f0e 100644 --- a/chrome/app/generated_resources.grd +++ b/chrome/app/generated_resources.grd @@ -6716,12 +6716,6 @@ Keep your key file in a safe place. You will need it to create new versions of y <message name="IDS_FLAGS_ENABLE_VP9_PLAYBACK_DESCRIPTION" desc="Description for the flag to enable VP9 playback in <video> elements."> Enable experimental VP9 playback in the video element. </message> - <message name="IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_NAME" desc="Title for the flag to enable VP8 Alpha playback in <video> elements."> - Enable VP8 Alpha playback in <video> elements. - </message> - <message name="IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_DESCRIPTION" desc="Description for the flag to enable VP8 Alpha playback in <video> elements."> - Enable experimental VP8 Alpha playback in the video element. - </message> <message name="IDS_FLAGS_ASH_AUTO_WINDOW_PLACEMENT_NAME" desc="Name for the option to enable/disable the auto window placement functionality."> Automatic window placement. </message> diff --git a/chrome/browser/about_flags.cc b/chrome/browser/about_flags.cc index a7bd015..563297b 100644 --- a/chrome/browser/about_flags.cc +++ b/chrome/browser/about_flags.cc @@ -722,13 +722,6 @@ const Experiment kExperiments[] = { SINGLE_VALUE_TYPE(switches::kEnableVp9Playback) }, { - "enable-vp8-alpha-playback", - IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_NAME, - IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_DESCRIPTION, - kOsDesktop, - SINGLE_VALUE_TYPE(switches::kEnableVp8AlphaPlayback) - }, - { "enable-managed-users", IDS_FLAGS_ENABLE_LOCALLY_MANAGED_USERS_NAME, IDS_FLAGS_ENABLE_LOCALLY_MANAGED_USERS_DESCRIPTION, diff --git a/content/browser/renderer_host/media/video_capture_controller.cc b/content/browser/renderer_host/media/video_capture_controller.cc index b82b796..25d41a5 100644 --- a/content/browser/renderer_host/media/video_capture_controller.cc +++ b/content/browser/renderer_host/media/video_capture_controller.cc @@ -398,7 +398,6 @@ void VideoCaptureController::OnIncomingCapturedVideoFrame( const int kYPlane = media::VideoFrame::kYPlane; const int kUPlane = media::VideoFrame::kUPlane; const int kVPlane = media::VideoFrame::kVPlane; - const int kAPlane = media::VideoFrame::kAPlane; const int kRGBPlane = media::VideoFrame::kRGBPlane; // Do color conversion from the camera format to I420. @@ -431,26 +430,6 @@ void VideoCaptureController::OnIncomingCapturedVideoFrame( target); break; } - case media::VideoFrame::YV12A: { - DCHECK(!chopped_width_ && !chopped_height_); - media::CopyYPlane(frame->data(kYPlane), - frame->stride(kYPlane), - frame->rows(kYPlane), - target); - media::CopyUPlane(frame->data(kUPlane), - frame->stride(kUPlane), - frame->rows(kUPlane), - target); - media::CopyVPlane(frame->data(kVPlane), - frame->stride(kVPlane), - frame->rows(kVPlane), - target); - media::CopyAPlane(frame->data(kAPlane), - frame->stride(kAPlane), - frame->rows(kAPlane), - target); - break; - } case media::VideoFrame::RGB32: { media::ConvertRGB32ToYUV(frame->data(kRGBPlane), target->data(kYPlane), diff --git a/content/browser/renderer_host/render_process_host_impl.cc b/content/browser/renderer_host/render_process_host_impl.cc index 9f97093..53b2822 100644 --- a/content/browser/renderer_host/render_process_host_impl.cc +++ b/content/browser/renderer_host/render_process_host_impl.cc @@ -868,7 +868,6 @@ void RenderProcessHostImpl::PropagateBrowserCommandLineToRenderer( switches::kEnableViewport, switches::kEnableOpusPlayback, switches::kEnableVp9Playback, - switches::kEnableVp8AlphaPlayback, switches::kForceDeviceScaleFactor, switches::kFullMemoryCrashReport, #if !defined (GOOGLE_CHROME_BUILD) diff --git a/media/base/decoder_buffer.cc b/media/base/decoder_buffer.cc index aec4521..03f9bbb 100644 --- a/media/base/decoder_buffer.cc +++ b/media/base/decoder_buffer.cc @@ -10,14 +10,12 @@ namespace media { DecoderBuffer::DecoderBuffer(int size) - : size_(size), - side_data_size_(0) { + : size_(size) { Initialize(); } DecoderBuffer::DecoderBuffer(const uint8* data, int size) - : size_(size), - side_data_size_(0) { + : size_(size) { if (!data) { CHECK_EQ(size_, 0); return; @@ -27,20 +25,6 @@ DecoderBuffer::DecoderBuffer(const uint8* data, int size) memcpy(data_.get(), data, size_); } -DecoderBuffer::DecoderBuffer(const uint8* data, int size, - const uint8* side_data, int side_data_size) - : size_(size), - side_data_size_(side_data_size) { - if (!data) { - CHECK_EQ(size_, 0); - return; - } - - Initialize(); - memcpy(data_.get(), data, size_); - memcpy(side_data_.get(), side_data, side_data_size_); -} - DecoderBuffer::~DecoderBuffer() {} void DecoderBuffer::Initialize() { @@ -48,11 +32,6 @@ void DecoderBuffer::Initialize() { data_.reset(reinterpret_cast<uint8*>( base::AlignedAlloc(size_ + kPaddingSize, kAlignmentSize))); memset(data_.get() + size_, 0, kPaddingSize); - if (side_data_size_ > 0) { - side_data_.reset(reinterpret_cast<uint8*>( - base::AlignedAlloc(side_data_size_ + kPaddingSize, kAlignmentSize))); - memset(side_data_.get() + side_data_size_, 0, kPaddingSize); - } } // static @@ -64,18 +43,6 @@ scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data, } // static -scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data, - int data_size, - const uint8* side_data, - int side_data_size) { - // If you hit this CHECK you likely have a bug in a demuxer. Go fix it. - CHECK(data); - CHECK(side_data); - return make_scoped_refptr(new DecoderBuffer(data, data_size, - side_data, side_data_size)); -} - -// static scoped_refptr<DecoderBuffer> DecoderBuffer::CreateEOSBuffer() { return make_scoped_refptr(new DecoderBuffer(NULL, 0)); } @@ -115,16 +82,6 @@ int DecoderBuffer::GetDataSize() const { return size_; } -const uint8* DecoderBuffer::GetSideData() const { - DCHECK(!IsEndOfStream()); - return side_data_.get(); -} - -int DecoderBuffer::GetSideDataSize() const { - DCHECK(!IsEndOfStream()); - return side_data_size_; -} - const DecryptConfig* DecoderBuffer::GetDecryptConfig() const { DCHECK(!IsEndOfStream()); return decrypt_config_.get(); @@ -148,7 +105,6 @@ std::string DecoderBuffer::AsHumanReadableString() { s << "timestamp: " << timestamp_.InMicroseconds() << " duration: " << duration_.InMicroseconds() << " size: " << size_ - << " side_data_size: " << side_data_size_ << " encrypted: " << (decrypt_config_ != NULL); return s.str(); } diff --git a/media/base/decoder_buffer.h b/media/base/decoder_buffer.h index 168ab2c..c23e88f 100644 --- a/media/base/decoder_buffer.h +++ b/media/base/decoder_buffer.h @@ -47,13 +47,6 @@ class MEDIA_EXPORT DecoderBuffer // padded and aligned as necessary. |data| must not be NULL and |size| >= 0. static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size); - // Create a DecoderBuffer whose |data_| is copied from |data| and |side_data_| - // is copied from |side_data|. Buffers will be padded and aligned as necessary - // Data pointers must not be NULL and sizes must be >= 0. - static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size, - const uint8* side_data, - int side_data_size); - // Create a DecoderBuffer indicating we've reached end of stream. // // Calling any method other than IsEndOfStream() on the resulting buffer @@ -71,9 +64,6 @@ class MEDIA_EXPORT DecoderBuffer int GetDataSize() const; - const uint8* GetSideData() const; - int GetSideDataSize() const; - const DecryptConfig* GetDecryptConfig() const; void SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config); @@ -90,8 +80,6 @@ class MEDIA_EXPORT DecoderBuffer // will be padded and aligned as necessary. If |data| is NULL then |data_| is // set to NULL and |buffer_size_| to 0. DecoderBuffer(const uint8* data, int size); - DecoderBuffer(const uint8* data, int size, - const uint8* side_data, int side_data_size); virtual ~DecoderBuffer(); private: @@ -100,8 +88,6 @@ class MEDIA_EXPORT DecoderBuffer int size_; scoped_ptr<uint8, base::ScopedPtrAlignedFree> data_; - int side_data_size_; - scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_; scoped_ptr<DecryptConfig> decrypt_config_; // Constructor helper method for memory allocations. diff --git a/media/base/decoder_buffer_unittest.cc b/media/base/decoder_buffer_unittest.cc index 7880a80..32c38d0 100644 --- a/media/base/decoder_buffer_unittest.cc +++ b/media/base/decoder_buffer_unittest.cc @@ -35,17 +35,6 @@ TEST(DecoderBufferTest, CopyFrom) { EXPECT_EQ(buffer2->GetDataSize(), kDataSize); EXPECT_EQ(0, memcmp(buffer2->GetData(), kData, kDataSize)); EXPECT_FALSE(buffer2->IsEndOfStream()); - scoped_refptr<DecoderBuffer> buffer3(DecoderBuffer::CopyFrom( - reinterpret_cast<const uint8*>(&kData), kDataSize, - reinterpret_cast<const uint8*>(&kData), kDataSize)); - ASSERT_TRUE(buffer3); - EXPECT_NE(kData, buffer3->GetData()); - EXPECT_EQ(buffer3->GetDataSize(), kDataSize); - EXPECT_EQ(0, memcmp(buffer3->GetData(), kData, kDataSize)); - EXPECT_NE(kData, buffer3->GetSideData()); - EXPECT_EQ(buffer3->GetSideDataSize(), kDataSize); - EXPECT_EQ(0, memcmp(buffer3->GetSideData(), kData, kDataSize)); - EXPECT_FALSE(buffer3->IsEndOfStream()); } #if !defined(OS_ANDROID) diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc index 7c1ab90..04e711e 100644 --- a/media/base/media_switches.cc +++ b/media/base/media_switches.cc @@ -57,9 +57,6 @@ const char kEnableOpusPlayback[] = "enable-opus-playback"; // Enables VP9 playback in media elements. const char kEnableVp9Playback[] = "enable-vp9-playback"; -// Enables VP8 Alpha playback in media elements. -const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback"; - #if defined(OS_WIN) const char kWaveOutBuffers[] = "waveout-buffers"; #endif diff --git a/media/base/media_switches.h b/media/base/media_switches.h index e9b6ab9..ea32fae 100644 --- a/media/base/media_switches.h +++ b/media/base/media_switches.h @@ -44,8 +44,6 @@ MEDIA_EXPORT extern const char kEnableOpusPlayback[]; MEDIA_EXPORT extern const char kEnableVp9Playback[]; -MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[]; - #if defined(OS_WIN) MEDIA_EXPORT extern const char kWaveOutBuffers[]; #endif diff --git a/media/base/simd/convert_yuv_to_rgb.h b/media/base/simd/convert_yuv_to_rgb.h index 7db35b5..d05f039 100644 --- a/media/base/simd/convert_yuv_to_rgb.h +++ b/media/base/simd/convert_yuv_to_rgb.h @@ -21,19 +21,6 @@ typedef void (*ConvertYUVToRGB32Proc)(const uint8*, int, YUVType); -typedef void (*ConvertYUVAToARGBProc)(const uint8*, - const uint8*, - const uint8*, - const uint8*, - uint8*, - int, - int, - int, - int, - int, - int, - YUVType); - void ConvertYUVToRGB32_C(const uint8* yplane, const uint8* uplane, const uint8* vplane, @@ -45,19 +32,6 @@ void ConvertYUVToRGB32_C(const uint8* yplane, int rgbstride, YUVType yuv_type); -void ConvertYUVAToARGB_C(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - int width, - int height, - int ystride, - int uvstride, - int avstride, - int rgbstride, - YUVType yuv_type); - void ConvertYUVToRGB32_SSE(const uint8* yplane, const uint8* uplane, const uint8* vplane, @@ -80,19 +54,6 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane, int rgbstride, YUVType yuv_type); -void ConvertYUVAToARGB_MMX(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - int width, - int height, - int ystride, - int uvstride, - int avstride, - int rgbstride, - YUVType yuv_type); - } // namespace media // Assembly functions are declared without namespace. @@ -111,13 +72,6 @@ typedef void (*ConvertYUVToRGB32RowProc)(const uint8*, uint8*, ptrdiff_t); -typedef void (*ConvertYUVAToARGBRowProc)(const uint8*, - const uint8*, - const uint8*, - const uint8*, - uint8*, - ptrdiff_t); - typedef void (*ScaleYUVToRGB32RowProc)(const uint8*, const uint8*, const uint8*, @@ -131,26 +85,12 @@ void ConvertYUVToRGB32Row_C(const uint8* yplane, uint8* rgbframe, ptrdiff_t width); -void ConvertYUVAToARGBRow_C(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - ptrdiff_t width); - void ConvertYUVToRGB32Row_MMX(const uint8* yplane, const uint8* uplane, const uint8* vplane, uint8* rgbframe, ptrdiff_t width); -void ConvertYUVAToARGBRow_MMX(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - ptrdiff_t width); - void ConvertYUVToRGB32Row_SSE(const uint8* yplane, const uint8* uplane, const uint8* vplane, diff --git a/media/base/simd/convert_yuv_to_rgb_c.cc b/media/base/simd/convert_yuv_to_rgb_c.cc index e09ed13..2849cac 100644 --- a/media/base/simd/convert_yuv_to_rgb_c.cc +++ b/media/base/simd/convert_yuv_to_rgb_c.cc @@ -39,34 +39,6 @@ static inline void ConvertYUVToRGB32_C(uint8 y, (packuswb(a) << 24); } -static inline void ConvertYUVAToARGB_C(uint8 y, - uint8 u, - uint8 v, - uint8 a, - uint8* rgb_buf) { - int b = kCoefficientsRgbY[256+u][0]; - int g = kCoefficientsRgbY[256+u][1]; - int r = kCoefficientsRgbY[256+u][2]; - - b = paddsw(b, kCoefficientsRgbY[512+v][0]); - g = paddsw(g, kCoefficientsRgbY[512+v][1]); - r = paddsw(r, kCoefficientsRgbY[512+v][2]); - - b = paddsw(b, kCoefficientsRgbY[y][0]); - g = paddsw(g, kCoefficientsRgbY[y][1]); - r = paddsw(r, kCoefficientsRgbY[y][2]); - - b >>= 6; - g >>= 6; - r >>= 6; - - b = packuswb(b) * a >> 8; - g = packuswb(g) * a >> 8; - r = packuswb(r) * a >> 8; - - *reinterpret_cast<uint32*>(rgb_buf) = b | (g << 8) | (r << 16) | (a << 24); -} - extern "C" { void ConvertYUVToRGB32Row_C(const uint8* y_buf, @@ -87,27 +59,6 @@ void ConvertYUVToRGB32Row_C(const uint8* y_buf, } } -void ConvertYUVAToARGBRow_C(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - const uint8* a_buf, - uint8* rgba_buf, - ptrdiff_t width) { - for (int x = 0; x < width; x += 2) { - uint8 u = u_buf[x >> 1]; - uint8 v = v_buf[x >> 1]; - uint8 y0 = y_buf[x]; - uint8 a0 = a_buf[x]; - ConvertYUVAToARGB_C(y0, u, v, a0, rgba_buf); - if ((x + 1) < width) { - uint8 y1 = y_buf[x + 1]; - uint8 a1 = a_buf[x + 1]; - ConvertYUVAToARGB_C(y1, u, v, a1, rgba_buf + 4); - } - rgba_buf += 8; // Advance 2 pixels. - } -} - // 16.16 fixed point is used. A shift by 16 isolates the integer. // A shift by 17 is used to further subsample the chrominence channels. // & 0xffff isolates the fixed point fraction. >> 2 to get the upper 2 bits, @@ -210,33 +161,4 @@ void ConvertYUVToRGB32_C(const uint8* yplane, } } -void ConvertYUVAToARGB_C(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbaframe, - int width, - int height, - int ystride, - int uvstride, - int astride, - int rgbastride, - YUVType yuv_type) { - unsigned int y_shift = yuv_type; - for (int y = 0; y < height; y++) { - uint8* rgba_row = rgbaframe + y * rgbastride; - const uint8* y_ptr = yplane + y * ystride; - const uint8* u_ptr = uplane + (y >> y_shift) * uvstride; - const uint8* v_ptr = vplane + (y >> y_shift) * uvstride; - const uint8* a_ptr = aplane + y * astride; - - ConvertYUVAToARGBRow_C(y_ptr, - u_ptr, - v_ptr, - a_ptr, - rgba_row, - width); - } -} - } // namespace media diff --git a/media/base/simd/convert_yuv_to_rgb_x86.cc b/media/base/simd/convert_yuv_to_rgb_x86.cc index d1d6e16..37b168d 100644 --- a/media/base/simd/convert_yuv_to_rgb_x86.cc +++ b/media/base/simd/convert_yuv_to_rgb_x86.cc @@ -40,37 +40,6 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane, EmptyRegisterState(); } -void ConvertYUVAToARGB_MMX(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - int width, - int height, - int ystride, - int uvstride, - int astride, - int rgbstride, - YUVType yuv_type) { - unsigned int y_shift = yuv_type; - for (int y = 0; y < height; ++y) { - uint8* rgb_row = rgbframe + y * rgbstride; - const uint8* y_ptr = yplane + y * ystride; - const uint8* u_ptr = uplane + (y >> y_shift) * uvstride; - const uint8* v_ptr = vplane + (y >> y_shift) * uvstride; - const uint8* a_ptr = aplane + y * astride; - - ConvertYUVAToARGBRow_MMX(y_ptr, - u_ptr, - v_ptr, - a_ptr, - rgb_row, - width); - } - - EmptyRegisterState(); -} - void ConvertYUVToRGB32_SSE(const uint8* yplane, const uint8* uplane, const uint8* vplane, diff --git a/media/base/simd/convert_yuva_to_argb_mmx.asm b/media/base/simd/convert_yuva_to_argb_mmx.asm deleted file mode 100644 index b39315dc..0000000 --- a/media/base/simd/convert_yuva_to_argb_mmx.asm +++ /dev/null @@ -1,23 +0,0 @@ -; Copyright (c) 2011 The Chromium Authors. All rights reserved. -; Use of this source code is governed by a BSD-style license that can be -; found in the LICENSE file. - -%include "third_party/x86inc/x86inc.asm" - -; -; This file uses MMX instructions. -; - SECTION_TEXT - CPU MMX - -; Use movq to save the output. -%define MOVQ movq - -; extern "C" void ConvertYUVAToARGBRow_MMX(const uint8* y_buf, -; const uint8* u_buf, -; const uint8* v_buf, -; const uint8* a_buf, -; uint8* rgb_buf, -; ptrdiff_t width); -%define SYMBOL ConvertYUVAToARGBRow_MMX -%include "convert_yuva_to_argb_mmx.inc" diff --git a/media/base/simd/convert_yuva_to_argb_mmx.inc b/media/base/simd/convert_yuva_to_argb_mmx.inc deleted file mode 100644 index 621100e..0000000 --- a/media/base/simd/convert_yuva_to_argb_mmx.inc +++ /dev/null @@ -1,174 +0,0 @@ -; Copyright (c) 2011 The Chromium Authors. All rights reserved. -; Use of this source code is governed by a BSD-style license that can be -; found in the LICENSE file. - - global mangle(SYMBOL) PRIVATE - align function_align - -; Non-PIC code is the fastest so use this if possible. -%ifndef PIC -mangle(SYMBOL): - %assign stack_offset 0 - PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP - extern mangle(kCoefficientsRgbY) - jmp .convertend - -.convertloop: - movzx TEMPd, BYTE [Uq] - movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq] - add Uq, 1 - movzx TEMPd, BYTE [Vq] - paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq] - add Vq, 1 - movzx TEMPd, BYTE [Yq] - movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq] - movzx TEMPd, BYTE [Yq + 1] - movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPq] - add Yq, 2 - paddsw mm1, mm0 - paddsw mm2, mm0 - psraw mm1, 6 - psraw mm2, 6 - packuswb mm1, mm2 - - ; Multiply ARGB by alpha value. - movq mm0, mm1 - pxor mm2, mm2 - punpcklbw mm0, mm2 - punpckhbw mm1, mm2 - movzx TEMPd, BYTE [Aq] - movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq] - pmullw mm0, mm2 - psrlw mm0, 8 - movzx TEMPd, BYTE [Aq + 1] - movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq] - add Aq, 2 - pmullw mm1, mm2 - psrlw mm1, 8 - packuswb mm0, mm1 - - MOVQ [ARGBq], mm0 - add ARGBq, 8 - -.convertend: - sub WIDTHq, 2 - jns .convertloop - - ; If number of pixels is odd then compute it. - and WIDTHq, 1 - jz .convertdone - - movzx TEMPd, BYTE [Uq] - movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq] - movzx TEMPd, BYTE [Vq] - paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq] - movzx TEMPd, BYTE [Yq] - movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq] - paddsw mm1, mm0 - psraw mm1, 6 - packuswb mm1, mm1 - - ; Multiply ARGB by alpha value. - pxor mm0, mm0 - punpcklbw mm1, mm0 - movzx TEMPd, BYTE [Aq] - movq mm0, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq] - pmullw mm1, mm0 - psrlw mm1, 8 - packuswb mm1, mm1 - - movd [ARGBq], mm1 - -.convertdone: - RET -%endif - -; With PIC code we need to load the address of mangle(kCoefficientsRgbY). -; This code is slower than the above version. -%ifdef PIC -mangle(SYMBOL): - %assign stack_offset 0 - PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP - extern mangle(kCoefficientsRgbY) - PUSH WIDTHq - DEFINE_ARGS Y, U, V, A, ARGB, TABLE, TEMP - LOAD_SYM TABLEq, mangle(kCoefficientsRgbY) - jmp .convertend - -.convertloop: - movzx TEMPd, BYTE [Uq] - movq mm0, [TABLEq + 2048 + 8 * TEMPq] - add Uq, 1 - - movzx TEMPd, BYTE [Vq] - paddsw mm0, [TABLEq + 4096 + 8 * TEMPq] - add Vq, 1 - - movzx TEMPd, BYTE [Yq] - movq mm1, [TABLEq + 8 * TEMPq] - - movzx TEMPd, BYTE [Yq + 1] - movq mm2, [TABLEq + 8 * TEMPq] - add Yq, 2 - - ; Add UV components to Y component. - paddsw mm1, mm0 - paddsw mm2, mm0 - - ; Down shift and then pack. - psraw mm1, 6 - psraw mm2, 6 - packuswb mm1, mm2 - - ; Unpack and multiply by alpha value, then repack high bytes of words. - movq mm0, mm1 - pxor mm2, mm2 - punpcklbw mm0, mm2 - punpckhbw mm1, mm2 - movzx TEMPd, BYTE [Aq] - movq mm2, [TABLEq + 6144 + 8 * TEMPq] - pmullw mm0, mm2 - psrlw mm0, 8 - movzx TEMPd, BYTE [Aq + 1] - movq mm2, [TABLEq + 6144 + 8 * TEMPq] - add Aq, 2 - pmullw mm1, mm2 - psrlw mm1, 8 - packuswb mm0, mm1 - - MOVQ [ARGBq], mm0 - add ARGBq, 8 - -.convertend: - sub dword [rsp], 2 - jns .convertloop - - ; If number of pixels is odd then compute it. - and dword [rsp], 1 - jz .convertdone - - movzx TEMPd, BYTE [Uq] - movq mm0, [TABLEq + 2048 + 8 * TEMPq] - movzx TEMPd, BYTE [Vq] - paddsw mm0, [TABLEq + 4096 + 8 * TEMPq] - movzx TEMPd, BYTE [Yq] - movq mm1, [TABLEq + 8 * TEMPq] - paddsw mm1, mm0 - psraw mm1, 6 - packuswb mm1, mm1 - - ; Multiply ARGB by alpha value. - pxor mm0, mm0 - punpcklbw mm1, mm0 - movzx TEMPd, BYTE [Aq] - movq mm0, [TABLEq + 6144 + 8 * TEMPq] - pmullw mm1, mm0 - psrlw mm1, 8 - packuswb mm1, mm1 - - movd [ARGBq], mm1 - -.convertdone: - POP TABLEq - RET -%endif
\ No newline at end of file diff --git a/media/base/simd/yuv_to_rgb_table.cc b/media/base/simd/yuv_to_rgb_table.cc index 3789969..f998e85 100644 --- a/media/base/simd/yuv_to_rgb_table.cc +++ b/media/base/simd/yuv_to_rgb_table.cc @@ -6,7 +6,6 @@ extern "C" { -// Defines the R,G,B,A contributions from Y. #define RGBY(i) { \ static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \ static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \ @@ -14,9 +13,6 @@ extern "C" { 0 \ } -// Defines the R,G,B,A contributions from U. -// The contribution to A is the same for any value of U -// causing the final A value to be 255 in every conversion. #define RGBU(i) { \ static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \ static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \ @@ -24,7 +20,6 @@ extern "C" { static_cast<int16>(256 * 64 - 1) \ } -// Defines the R,G,B,A contributions from V. #define RGBV(i) { \ 0, \ static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \ @@ -32,18 +27,7 @@ extern "C" { 0 \ } -// Used to define a set of multiplier words for each alpha level. -#define ALPHA(i) { \ - i, i, i, i \ -} - -// The following table defines the RGBA contributions -// for each component of YUVA. The Y table is first followed -// by the U, and V tables. The alpha multiplier table follows. -// These tables are aligned and kept adjacent to optimize for -// SIMD and cacheing. - -SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 4][4]) = { +SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 3][4]) = { RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03), RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07), RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B), @@ -240,77 +224,10 @@ SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 4][4]) = { RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7), RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB), RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF), - - // Alpha multipliers for each alpha level. - ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03), - ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07), - ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B), - ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F), - ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13), - ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17), - ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B), - ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F), - ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23), - ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27), - ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B), - ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F), - ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33), - ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37), - ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B), - ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F), - ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43), - ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47), - ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B), - ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F), - ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53), - ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57), - ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B), - ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F), - ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63), - ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67), - ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B), - ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F), - ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73), - ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77), - ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B), - ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F), - ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83), - ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87), - ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B), - ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F), - ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93), - ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97), - ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B), - ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F), - ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3), - ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7), - ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB), - ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF), - ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3), - ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7), - ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB), - ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF), - ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3), - ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7), - ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB), - ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF), - ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3), - ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7), - ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB), - ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF), - ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3), - ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7), - ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB), - ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF), - ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3), - ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7), - ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB), - ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF), }; #undef RGBY #undef RGBU #undef RGBV -#undef ALPHA } // extern "C" diff --git a/media/base/simd/yuv_to_rgb_table.h b/media/base/simd/yuv_to_rgb_table.h index 3ed7bd9..0c43a7a 100644 --- a/media/base/simd/yuv_to_rgb_table.h +++ b/media/base/simd/yuv_to_rgb_table.h @@ -19,7 +19,7 @@ extern "C" { #endif // Align the table to 16-bytes to allow faster reading. -extern SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 4][4]); +extern SIMD_ALIGNED(int16 kCoefficientsRgbY[768][4]); } // extern "C" diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc index 6ca026c..62e81ca 100644 --- a/media/base/video_frame.cc +++ b/media/base/video_frame.cc @@ -32,7 +32,6 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame( frame->AllocateRGB(4u); break; case VideoFrame::YV12: - case VideoFrame::YV12A: case VideoFrame::YV16: frame->AllocateYUV(); break; @@ -168,8 +167,6 @@ size_t VideoFrame::NumPlanes(Format format) { case VideoFrame::YV12: case VideoFrame::YV16: return 3; - case VideoFrame::YV12A: - return 4; case VideoFrame::EMPTY: case VideoFrame::I420: case VideoFrame::INVALID: @@ -207,8 +204,7 @@ void VideoFrame::AllocateRGB(size_t bytes_per_pixel) { } void VideoFrame::AllocateYUV() { - DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 || - format_ == VideoFrame::YV12A); + DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16); // Align Y rows at least at 16 byte boundaries. The stride for both // YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for // U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in @@ -217,9 +213,7 @@ void VideoFrame::AllocateYUV() { // YV16. We also round the height of the surface allocated to be an even // number to avoid any potential of faulting by code that attempts to access // the Y values of the final row, but assumes that the last row of U & V - // applies to a full two rows of Y. YV12A is the same as YV12, but with an - // additional alpha plane that has the same size and alignment as the Y plane. - + // applies to a full two rows of Y. size_t y_stride = RoundUp(row_bytes(VideoFrame::kYPlane), kFrameSizeAlignment); size_t uv_stride = RoundUp(row_bytes(VideoFrame::kUPlane), @@ -228,12 +222,9 @@ void VideoFrame::AllocateYUV() { // and then the size needs to be a multiple of two macroblocks (vertically). // See libavcodec/utils.c:avcodec_align_dimensions2(). size_t y_height = RoundUp(coded_size_.height(), kFrameSizeAlignment * 2); - size_t uv_height = (format_ == VideoFrame::YV12 || - format_ == VideoFrame::YV12A) ? - y_height / 2 : y_height; + size_t uv_height = format_ == VideoFrame::YV12 ? y_height / 2 : y_height; size_t y_bytes = y_height * y_stride; size_t uv_bytes = uv_height * uv_stride; - size_t a_bytes = format_ == VideoFrame::YV12A ? y_bytes : 0; // The extra line of UV being allocated is because h264 chroma MC // overreads by one line in some cases, see libavcodec/utils.c: @@ -241,7 +232,7 @@ void VideoFrame::AllocateYUV() { // put_h264_chroma_mc4_ssse3(). uint8* data = reinterpret_cast<uint8*>( base::AlignedAlloc( - y_bytes + (uv_bytes * 2 + uv_stride) + a_bytes + kFrameSizePadding, + y_bytes + (uv_bytes * 2 + uv_stride) + kFrameSizePadding, kFrameAddressAlignment)); no_longer_needed_cb_ = base::Bind(&ReleaseData, data); COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0); @@ -251,10 +242,6 @@ void VideoFrame::AllocateYUV() { strides_[VideoFrame::kYPlane] = y_stride; strides_[VideoFrame::kUPlane] = uv_stride; strides_[VideoFrame::kVPlane] = uv_stride; - if (format_ == YV12A) { - data_[VideoFrame::kAPlane] = data + y_bytes + (2 * uv_bytes); - strides_[VideoFrame::kAPlane] = y_stride; - } } VideoFrame::VideoFrame(VideoFrame::Format format, @@ -298,8 +285,7 @@ int VideoFrame::row_bytes(size_t plane) const { // Planar, 8bpp. case YV12: case YV16: - case YV12A: - if (plane == kYPlane || plane == kAPlane) + if (plane == kYPlane) return width; return RoundUp(width, 2) / 2; @@ -321,8 +307,7 @@ int VideoFrame::rows(size_t plane) const { return height; case YV12: - case YV12A: - if (plane == kYPlane || plane == kAPlane) + if (plane == kYPlane) return height; return RoundUp(height, 2) / 2; diff --git a/media/base/video_frame.h b/media/base/video_frame.h index 6cd5a0f..9a6f0a6 100644 --- a/media/base/video_frame.h +++ b/media/base/video_frame.h @@ -24,14 +24,13 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { }; enum { - kMaxPlanes = 4, + kMaxPlanes = 3, kRGBPlane = 0, kYPlane = 0, kUPlane = 1, kVPlane = 2, - kAPlane = 3, }; // Surface formats roughly based on FOURCC labels, see: @@ -48,7 +47,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> { #if defined(GOOGLE_TV) HOLE = 13, // Hole frame. #endif - YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples. }; // Creates a new frame in system memory with given parameters. Buffers for diff --git a/media/base/video_util.cc b/media/base/video_util.cc index e1de7bd..972ac17 100644 --- a/media/base/video_util.cc +++ b/media/base/video_util.cc @@ -62,16 +62,6 @@ void CopyVPlane(const uint8* source, int stride, int rows, VideoFrame* frame) { CopyPlane(VideoFrame::kVPlane, source, stride, rows, frame); } -void CopyAPlane(const uint8* source, int stride, int rows, VideoFrame* frame) { - CopyPlane(VideoFrame::kAPlane, source, stride, rows, frame); -} - -void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame) { - int rows_to_clear = std::min(frame->rows(VideoFrame::kAPlane), rows); - memset(frame->data(VideoFrame::kAPlane), 255, - frame->stride(VideoFrame::kAPlane) * rows_to_clear); -} - void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v) { // Fill the Y plane. uint8* y_plane = frame->data(VideoFrame::kYPlane); diff --git a/media/base/video_util.h b/media/base/video_util.h index 05e5927..27156fa 100644 --- a/media/base/video_util.h +++ b/media/base/video_util.h @@ -19,7 +19,7 @@ MEDIA_EXPORT gfx::Size GetNaturalSize(const gfx::Size& visible_size, int aspect_ratio_numerator, int aspect_ratio_denominator); -// Copies a plane of YUV(A) source into a VideoFrame object, taking into account +// Copies a plane of YUV source into a VideoFrame object, taking into account // source and destinations dimensions. // // NOTE: rows is *not* the same as height! @@ -29,14 +29,8 @@ MEDIA_EXPORT void CopyUPlane(const uint8* source, int stride, int rows, VideoFrame* frame); MEDIA_EXPORT void CopyVPlane(const uint8* source, int stride, int rows, VideoFrame* frame); -MEDIA_EXPORT void CopyAPlane(const uint8* source, int stride, int rows, - VideoFrame* frame); - -// Sets alpha plane values to be completely opaque (all 255's). -MEDIA_EXPORT void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame); - // |plane| is one of VideoFrame::kYPlane, VideoFrame::kUPlane, -// VideoFrame::kVPlane or VideoFrame::kAPlane +// or VideoFrame::kVPlane. MEDIA_EXPORT void CopyPlane(size_t plane, const uint8* source, int stride, int rows, VideoFrame* frame); diff --git a/media/base/yuv_convert.cc b/media/base/yuv_convert.cc index 1d09a24..85b0699 100644 --- a/media/base/yuv_convert.cc +++ b/media/base/yuv_convert.cc @@ -603,34 +603,4 @@ void ConvertYUVToRGB32(const uint8* yplane, #endif } -void ConvertYUVAToARGB(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - int width, - int height, - int ystride, - int uvstride, - int astride, - int rgbstride, - YUVType yuv_type) { -#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY) - ConvertYUVAToARGB_C(yplane, uplane, vplane, aplane, rgbframe, - width, height, ystride, uvstride, astride, rgbstride, - yuv_type); -#else - static ConvertYUVAToARGBProc convert_proc = NULL; - if (!convert_proc) { - base::CPU cpu; - if (cpu.has_mmx()) - convert_proc = &ConvertYUVAToARGB_MMX; - else - convert_proc = &ConvertYUVAToARGB_C; - } - convert_proc(yplane, uplane, vplane, aplane, rgbframe, - width, height, ystride, uvstride, astride, rgbstride, yuv_type); -#endif -} - } // namespace media diff --git a/media/base/yuv_convert.h b/media/base/yuv_convert.h index 0e53193..30c07ab 100644 --- a/media/base/yuv_convert.h +++ b/media/base/yuv_convert.h @@ -60,21 +60,6 @@ void ConvertYUVToRGB32(const uint8* yplane, int rgbstride, YUVType yuv_type); -// Convert a frame of YUVA to 32 bit ARGB. -// Pass in YV12A -void ConvertYUVAToARGB(const uint8* yplane, - const uint8* uplane, - const uint8* vplane, - const uint8* aplane, - uint8* rgbframe, - int width, - int height, - int ystride, - int uvstride, - int astride, - int rgbstride, - YUVType yuv_type); - // Scale a frame of YUV to 32 bit ARGB. // Supports rotation and mirroring. void ScaleYUVToRGB32(const uint8* yplane, diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc index cc7dfc2..12a4ec9 100644 --- a/media/ffmpeg/ffmpeg_common.cc +++ b/media/ffmpeg/ffmpeg_common.cc @@ -382,12 +382,6 @@ void AVStreamToVideoDecoderConfig( if (key) is_encrypted = true; - AVDictionaryEntry* webm_alpha = - av_dict_get(stream->metadata, "alpha_mode", NULL, 0); - if (webm_alpha && !strcmp(webm_alpha->value, "1")) { - format = VideoFrame::YV12A; - } - config->Initialize(codec, profile, format, @@ -496,8 +490,6 @@ VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) { case PIX_FMT_YUV420P: case PIX_FMT_YUVJ420P: return VideoFrame::YV12; - case PIX_FMT_YUVA420P: - return VideoFrame::YV12A; default: DVLOG(1) << "Unsupported PixelFormat: " << pixel_format; } @@ -510,8 +502,6 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) { return PIX_FMT_YUV422P; case VideoFrame::YV12: return PIX_FMT_YUV420P; - case VideoFrame::YV12A: - return PIX_FMT_YUVA420P; default: DVLOG(1) << "Unsupported VideoFrame::Format: " << video_format; } diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc index 954b3ac..fec580a 100644 --- a/media/filters/ffmpeg_demuxer.cc +++ b/media/filters/ffmpeg_demuxer.cc @@ -108,26 +108,11 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) { LOG(ERROR) << "Format conversion failed."; } - // Get side data if any. For now, the only type of side_data is VP8 Alpha. We - // keep this generic so that other side_data types in the future can be - // handled the same way as well. - av_packet_split_side_data(packet.get()); - int side_data_size = 0; - uint8* side_data = av_packet_get_side_data( - packet.get(), - AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, - &side_data_size); - // If a packet is returned by FFmpeg's av_parser_parse2() the packet will // reference inner memory of FFmpeg. As such we should transfer the packet // into memory we control. scoped_refptr<DecoderBuffer> buffer; - if (side_data_size > 0) { - buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, - side_data, side_data_size); - } else { - buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); - } + buffer = DecoderBuffer::CopyFrom(packet->data, packet->size); if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc index ef5694d..2945db2 100644 --- a/media/filters/pipeline_integration_test.cc +++ b/media/filters/pipeline_integration_test.cc @@ -923,12 +923,4 @@ TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VP9_Opus_WebM) { ASSERT_TRUE(WaitUntilOnEnded()); } -// Verify that VP8 video with alpha channel can be played back. -TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) { - ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a.webm"), - PIPELINE_OK)); - Play(); - ASSERT_TRUE(WaitUntilOnEnded()); -} - } // namespace media diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc index 94395bf..b478a73 100644 --- a/media/filters/skcanvas_video_renderer.cc +++ b/media/filters/skcanvas_video_renderer.cc @@ -21,17 +21,6 @@ static bool IsEitherYV12OrYV16OrNative(media::VideoFrame::Format format) { format == media::VideoFrame::NATIVE_TEXTURE; } -static bool IsEitherYV12OrYV12AOrYV16(media::VideoFrame::Format format) { - return IsEitherYV12OrYV16(format) || - format == media::VideoFrame::YV12A; -} - -static bool IsEitherYV12OrYV12AOrYV16OrNative( - media::VideoFrame::Format format) { - return IsEitherYV12OrYV16OrNative(format) || - format == media::VideoFrame::YV12A; -} - // CanFastPaint is a helper method to determine the conditions for fast // painting. The conditions are: // 1. No skew in canvas matrix. @@ -81,8 +70,7 @@ static void FastPaint( const SkBitmap& bitmap = canvas->getDevice()->accessBitmap(true); media::YUVType yuv_type = media::YV16; int y_shift = 0; - if (video_frame->format() == media::VideoFrame::YV12 || - video_frame->format() == media::VideoFrame::YV12A) { + if (video_frame->format() == media::VideoFrame::YV12) { yuv_type = media::YV12; y_shift = 1; } @@ -182,9 +170,9 @@ static void FastPaint( static void ConvertVideoFrameToBitmap( const scoped_refptr<media::VideoFrame>& video_frame, SkBitmap* bitmap) { - DCHECK(IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format())) + DCHECK(IsEitherYV12OrYV16OrNative(video_frame->format())) << video_frame->format(); - if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) { + if (IsEitherYV12OrYV16(video_frame->format())) { DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane), video_frame->stride(media::VideoFrame::kVPlane)); } @@ -201,75 +189,45 @@ static void ConvertVideoFrameToBitmap( } bitmap->lockPixels(); + if (IsEitherYV12OrYV16(video_frame->format())) { + media::YUVType yuv_type = media::YV16; + int y_shift = 0; + if (video_frame->format() == media::VideoFrame::YV12) { + yuv_type = media::YV12; + y_shift = 1; + } - size_t y_offset = 0; - size_t uv_offset = 0; - if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) { - int y_shift = (video_frame->format() == media::VideoFrame::YV16) ? 0 : 1; // Use the "left" and "top" of the destination rect to locate the offset // in Y, U and V planes. - y_offset = (video_frame->stride(media::VideoFrame::kYPlane) * - video_frame->visible_rect().y()) + - video_frame->visible_rect().x(); + size_t y_offset = (video_frame->stride(media::VideoFrame::kYPlane) * + video_frame->visible_rect().y()) + + video_frame->visible_rect().x(); + // For format YV12, there is one U, V value per 2x2 block. // For format YV16, there is one U, V value per 2x1 block. - uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) * - (video_frame->visible_rect().y() >> y_shift)) + - (video_frame->visible_rect().x() >> 1); - } - switch (video_frame->format()) { - case media::VideoFrame::YV12: - media::ConvertYUVToRGB32( - video_frame->data(media::VideoFrame::kYPlane) + y_offset, - video_frame->data(media::VideoFrame::kUPlane) + uv_offset, - video_frame->data(media::VideoFrame::kVPlane) + uv_offset, - static_cast<uint8*>(bitmap->getPixels()), - video_frame->visible_rect().width(), - video_frame->visible_rect().height(), - video_frame->stride(media::VideoFrame::kYPlane), - video_frame->stride(media::VideoFrame::kUPlane), - bitmap->rowBytes(), - media::YV12); - break; - - case media::VideoFrame::YV16: - media::ConvertYUVToRGB32( - video_frame->data(media::VideoFrame::kYPlane) + y_offset, - video_frame->data(media::VideoFrame::kUPlane) + uv_offset, - video_frame->data(media::VideoFrame::kVPlane) + uv_offset, - static_cast<uint8*>(bitmap->getPixels()), - video_frame->visible_rect().width(), - video_frame->visible_rect().height(), - video_frame->stride(media::VideoFrame::kYPlane), - video_frame->stride(media::VideoFrame::kUPlane), - bitmap->rowBytes(), - media::YV16); - break; - - case media::VideoFrame::YV12A: - media::ConvertYUVAToARGB( - video_frame->data(media::VideoFrame::kYPlane) + y_offset, - video_frame->data(media::VideoFrame::kUPlane) + uv_offset, - video_frame->data(media::VideoFrame::kVPlane) + uv_offset, - video_frame->data(media::VideoFrame::kAPlane), - static_cast<uint8*>(bitmap->getPixels()), - video_frame->visible_rect().width(), - video_frame->visible_rect().height(), - video_frame->stride(media::VideoFrame::kYPlane), - video_frame->stride(media::VideoFrame::kUPlane), - video_frame->stride(media::VideoFrame::kAPlane), - bitmap->rowBytes(), - media::YV12); - break; - - case media::VideoFrame::NATIVE_TEXTURE: - DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE); - video_frame->ReadPixelsFromNativeTexture(*bitmap); - break; - - default: - NOTREACHED(); - break; + size_t uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) * + (video_frame->visible_rect().y() >> y_shift)) + + (video_frame->visible_rect().x() >> 1); + uint8* frame_clip_y = + video_frame->data(media::VideoFrame::kYPlane) + y_offset; + uint8* frame_clip_u = + video_frame->data(media::VideoFrame::kUPlane) + uv_offset; + uint8* frame_clip_v = + video_frame->data(media::VideoFrame::kVPlane) + uv_offset; + + media::ConvertYUVToRGB32(frame_clip_y, + frame_clip_u, + frame_clip_v, + static_cast<uint8*>(bitmap->getPixels()), + video_frame->visible_rect().width(), + video_frame->visible_rect().height(), + video_frame->stride(media::VideoFrame::kYPlane), + video_frame->stride(media::VideoFrame::kUPlane), + bitmap->rowBytes(), + yuv_type); + } else { + DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE); + video_frame->ReadPixelsFromNativeTexture(*bitmap); } bitmap->notifyPixelsChanged(); bitmap->unlockPixels(); @@ -297,8 +255,7 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame, // Paint black rectangle if there isn't a frame available or the // frame has an unexpected format. - if (!video_frame || - !IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format())) { + if (!video_frame || !IsEitherYV12OrYV16OrNative(video_frame->format())) { canvas->drawRect(dest, paint); return; } diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc index 76054eb..0b92a7d 100644 --- a/media/filters/vpx_video_decoder.cc +++ b/media/filters/vpx_video_decoder.cc @@ -11,7 +11,6 @@ #include "base/logging.h" #include "base/message_loop_proxy.h" #include "base/string_number_conversions.h" -#include "base/sys_byteorder.h" #include "media/base/bind_to_loop.h" #include "media/base/decoder_buffer.h" #include "media/base/demuxer_stream.h" @@ -21,6 +20,15 @@ #include "media/base/video_frame.h" #include "media/base/video_util.h" +// Include libvpx header files. +// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide +// backwards compatibility for legacy applications using the library. +#define VPX_CODEC_DISABLE_COMPAT 1 +extern "C" { +#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h" +#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h" +} + namespace media { // Always try to use three threads for video decoding. There is little reason @@ -50,11 +58,13 @@ static int GetThreadCount() { VpxVideoDecoder::VpxVideoDecoder( const scoped_refptr<base::MessageLoopProxy>& message_loop) : message_loop_(message_loop), - state_(kUninitialized) { + state_(kUninitialized), + vpx_codec_(NULL) { } VpxVideoDecoder::~VpxVideoDecoder() { DCHECK_EQ(kUninitialized, state_); + CloseDecoder(); } void VpxVideoDecoder::Initialize( @@ -82,28 +92,6 @@ void VpxVideoDecoder::Initialize( status_cb.Run(PIPELINE_OK); } -static scoped_ptr<vpx_codec_ctx, VpxDeleter> InitializeVpxContext( - scoped_ptr<vpx_codec_ctx, VpxDeleter> context, - const VideoDecoderConfig& config) { - context.reset(new vpx_codec_ctx()); - vpx_codec_dec_cfg_t vpx_config = {0}; - vpx_config.w = config.coded_size().width(); - vpx_config.h = config.coded_size().height(); - vpx_config.threads = GetThreadCount(); - - vpx_codec_err_t status = vpx_codec_dec_init(context.get(), - config.codec() == kCodecVP9 ? - vpx_codec_vp9_dx() : - vpx_codec_vp8_dx(), - &vpx_config, - 0); - if (status != VPX_CODEC_OK) { - LOG(ERROR) << "vpx_codec_dec_init failed, status=" << status; - context.reset(); - } - return context.Pass(); -} - bool VpxVideoDecoder::ConfigureDecoder() { const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config(); if (!config.IsValidConfig()) { @@ -112,32 +100,39 @@ bool VpxVideoDecoder::ConfigureDecoder() { return false; } - const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); - bool can_handle = false; - if (cmd_line->HasSwitch(switches::kEnableVp9Playback) && - config.codec() == kCodecVP9) { - can_handle = true; - } - if (cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) && - config.codec() == kCodecVP8 && config.format() == VideoFrame::YV12A) { - can_handle = true; - } - if (!can_handle) + if (config.codec() != kCodecVP9) return false; - vpx_codec_ = InitializeVpxContext(vpx_codec_.Pass(), config); - if (!vpx_codec_.get()) - return false; + CloseDecoder(); - if (config.format() == VideoFrame::YV12A) { - vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_.Pass(), config); - if (!vpx_codec_alpha_.get()) - return false; + vpx_codec_ = new vpx_codec_ctx(); + vpx_codec_dec_cfg_t vpx_config = {0}; + vpx_config.w = config.coded_size().width(); + vpx_config.h = config.coded_size().height(); + vpx_config.threads = GetThreadCount(); + + vpx_codec_err_t status = vpx_codec_dec_init(vpx_codec_, + vpx_codec_vp9_dx(), + &vpx_config, + 0); + if (status != VPX_CODEC_OK) { + LOG(ERROR) << "vpx_codec_dec_init failed, status=" << status; + delete vpx_codec_; + vpx_codec_ = NULL; + return false; } return true; } +void VpxVideoDecoder::CloseDecoder() { + if (vpx_codec_) { + vpx_codec_destroy(vpx_codec_); + delete vpx_codec_; + vpx_codec_ = NULL; + } +} + void VpxVideoDecoder::Read(const ReadCB& read_cb) { DCHECK(message_loop_->BelongsToCurrentThread()); DCHECK(!read_cb.is_null()); @@ -251,7 +246,7 @@ void VpxVideoDecoder::DecodeBuffer( } // Any successful decode counts! - if (buffer->GetDataSize() && buffer->GetSideDataSize()) { + if (buffer->GetDataSize()) { PipelineStatistics statistics; statistics.video_bytes_decoded = buffer->GetDataSize(); statistics_cb_.Run(statistics); @@ -275,7 +270,7 @@ bool VpxVideoDecoder::Decode( // Pass |buffer| to libvpx. int64 timestamp = buffer->GetTimestamp().InMicroseconds(); void* user_priv = reinterpret_cast<void*>(×tamp); - vpx_codec_err_t status = vpx_codec_decode(vpx_codec_.get(), + vpx_codec_err_t status = vpx_codec_decode(vpx_codec_, buffer->GetData(), buffer->GetDataSize(), user_priv, @@ -287,7 +282,7 @@ bool VpxVideoDecoder::Decode( // Gets pointer to decoded data. vpx_codec_iter_t iter = NULL; - const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_.get(), &iter); + const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_, &iter); if (!vpx_image) { *video_frame = NULL; return true; @@ -298,45 +293,7 @@ bool VpxVideoDecoder::Decode( return false; } - const vpx_image_t* vpx_image_alpha = NULL; - if (vpx_codec_alpha_.get() && buffer->GetSideDataSize() >= 8) { - // Pass alpha data to libvpx. - int64 timestamp_alpha = buffer->GetTimestamp().InMicroseconds(); - void* user_priv_alpha = reinterpret_cast<void*>(×tamp_alpha); - - // First 8 bytes of side data is side_data_id in big endian. - const uint64 side_data_id = base::NetToHost64( - *(reinterpret_cast<const uint64*>(buffer->GetSideData()))); - if (side_data_id == 1) { - status = vpx_codec_decode(vpx_codec_alpha_.get(), - buffer->GetSideData() + 8, - buffer->GetSideDataSize() - 8, - user_priv_alpha, - 0); - - if (status != VPX_CODEC_OK) { - LOG(ERROR) << "vpx_codec_decode() failed on alpha, status=" << status; - return false; - } - - // Gets pointer to decoded data. - vpx_codec_iter_t iter_alpha = NULL; - vpx_image_alpha = vpx_codec_get_frame(vpx_codec_alpha_.get(), - &iter_alpha); - if (!vpx_image_alpha) { - *video_frame = NULL; - return true; - } - - if (vpx_image_alpha->user_priv != - reinterpret_cast<void*>(×tamp_alpha)) { - LOG(ERROR) << "Invalid output timestamp on alpha."; - return false; - } - } - } - - CopyVpxImageTo(vpx_image, vpx_image_alpha, video_frame); + CopyVpxImageTo(vpx_image, video_frame); (*video_frame)->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp)); return true; } @@ -350,8 +307,7 @@ void VpxVideoDecoder::DoReset() { } void VpxVideoDecoder::CopyVpxImageTo( - const struct vpx_image* vpx_image, - const struct vpx_image* vpx_image_alpha, + const vpx_image* vpx_image, scoped_refptr<VideoFrame>* video_frame) { CHECK(vpx_image); CHECK_EQ(vpx_image->d_w % 2, 0U); @@ -363,14 +319,11 @@ void VpxVideoDecoder::CopyVpxImageTo( gfx::Size natural_size = demuxer_stream_->video_decoder_config().natural_size(); - *video_frame = VideoFrame::CreateFrame(vpx_codec_alpha_.get() ? - VideoFrame::YV12A : - VideoFrame::YV12, + *video_frame = VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), natural_size, kNoTimestamp()); - CopyYPlane(vpx_image->planes[VPX_PLANE_Y], vpx_image->stride[VPX_PLANE_Y], vpx_image->d_h, @@ -383,17 +336,6 @@ void VpxVideoDecoder::CopyVpxImageTo( vpx_image->stride[VPX_PLANE_V], vpx_image->d_h / 2, *video_frame); - if (!vpx_codec_alpha_.get()) - return; - if (!vpx_image_alpha) { - MakeOpaqueAPlane(vpx_image->stride[VPX_PLANE_Y], vpx_image->d_h, - *video_frame); - return; - } - CopyAPlane(vpx_image_alpha->planes[VPX_PLANE_Y], - vpx_image->stride[VPX_PLANE_Y], - vpx_image->d_h, - *video_frame); } } // namespace media diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h index b363d8b..77578fd 100644 --- a/media/filters/vpx_video_decoder.h +++ b/media/filters/vpx_video_decoder.h @@ -9,16 +9,6 @@ #include "base/memory/ref_counted.h" #include "media/base/demuxer_stream.h" #include "media/base/video_decoder.h" -#include "media/base/video_frame.h" - -// Include libvpx header files. -// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide -// backwards compatibility for legacy applications using the library. -#define VPX_CODEC_DISABLE_COMPAT 1 -extern "C" { -#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h" -#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h" -} struct vpx_codec_ctx; struct vpx_image; @@ -29,15 +19,6 @@ class MessageLoopProxy; namespace media { -struct VpxDeleter { - inline void operator()(vpx_codec_ctx* ptr) const { - if (ptr) { - vpx_codec_destroy(ptr); - delete ptr; - } - } -}; - class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder { public: explicit VpxVideoDecoder( @@ -66,6 +47,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder { // Returns true when initialization was successful. bool ConfigureDecoder(); + void CloseDecoder(); void ReadFromDemuxerStream(); // Carries out the buffer processing operation scheduled by @@ -80,8 +62,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder { // Reset decoder and call |reset_cb_|. void DoReset(); - void CopyVpxImageTo(const struct vpx_image* vpx_image, - const struct vpx_image* vpx_image_alpha, + void CopyVpxImageTo(const vpx_image* vpx_image, scoped_refptr<VideoFrame>* video_frame); scoped_refptr<base::MessageLoopProxy> message_loop_; @@ -95,8 +76,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder { // Pointer to the demuxer stream that will feed us compressed buffers. scoped_refptr<DemuxerStream> demuxer_stream_; - scoped_ptr<vpx_codec_ctx, VpxDeleter> vpx_codec_; - scoped_ptr<vpx_codec_ctx, VpxDeleter> vpx_codec_alpha_; + vpx_codec_ctx* vpx_codec_; DISALLOW_COPY_AND_ASSIGN(VpxVideoDecoder); }; diff --git a/media/media.gyp b/media/media.gyp index 1e37b14..0f836bc 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -1169,8 +1169,6 @@ 'base/simd/convert_yuv_to_rgb_mmx.inc', 'base/simd/convert_yuv_to_rgb_sse.asm', 'base/simd/convert_yuv_to_rgb_x86.cc', - 'base/simd/convert_yuva_to_argb_mmx.asm', - 'base/simd/convert_yuva_to_argb_mmx.inc', 'base/simd/empty_register_state_mmx.asm', 'base/simd/filter_yuv.h', 'base/simd/filter_yuv_c.cc', diff --git a/webkit/media/filter_helpers.cc b/webkit/media/filter_helpers.cc index 00cf121..c803f69 100644 --- a/webkit/media/filter_helpers.cc +++ b/webkit/media/filter_helpers.cc @@ -36,21 +36,20 @@ static void AddDefaultDecodersToCollection( const scoped_refptr<base::MessageLoopProxy>& message_loop, media::FilterCollection* filter_collection) { + scoped_refptr<media::FFmpegVideoDecoder> ffmpeg_video_decoder = + new media::FFmpegVideoDecoder(message_loop); + filter_collection->GetVideoDecoders()->push_back(ffmpeg_video_decoder); + // TODO(phajdan.jr): Remove ifdefs when libvpx with vp9 support is released // (http://crbug.com/174287) . #if !defined(MEDIA_DISABLE_LIBVPX) const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); - if (cmd_line->HasSwitch(switches::kEnableVp9Playback) || - cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) ) { + if (cmd_line->HasSwitch(switches::kEnableVp9Playback)) { scoped_refptr<media::VpxVideoDecoder> vpx_video_decoder = new media::VpxVideoDecoder(message_loop); filter_collection->GetVideoDecoders()->push_back(vpx_video_decoder); } #endif // !defined(MEDIA_DISABLE_LIBVPX) - - scoped_refptr<media::FFmpegVideoDecoder> ffmpeg_video_decoder = - new media::FFmpegVideoDecoder(message_loop); - filter_collection->GetVideoDecoders()->push_back(ffmpeg_video_decoder); } void BuildMediaSourceCollection( |