summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortomfinegan@chromium.org <tomfinegan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-16 22:19:15 +0000
committertomfinegan@chromium.org <tomfinegan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-04-16 22:19:15 +0000
commit4e8d20a5e0c0e29b9f7235f0d01c926316e778e4 (patch)
tree43c1b73db1f77aff7b6a53bf3fffe3b0a17e53af
parentd674f6e23dc4d4e8944df74074e06c1b5c18a7f1 (diff)
downloadchromium_src-4e8d20a5e0c0e29b9f7235f0d01c926316e778e4.zip
chromium_src-4e8d20a5e0c0e29b9f7235f0d01c926316e778e4.tar.gz
chromium_src-4e8d20a5e0c0e29b9f7235f0d01c926316e778e4.tar.bz2
media: Add support for playback for VP8 Alpha video streams.
BUG=147355 TEST=VP8 Alpha video streams play Review URL: https://codereview.chromium.org/12263013 Patch from Vignesh Venkatasubramanian <vigneshv@chromium.org>. git-svn-id: svn://svn.chromium.org/chrome/trunk/src@194465 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--cc/resources/video_resource_updater.cc4
-rw-r--r--chrome/app/generated_resources.grd6
-rw-r--r--chrome/browser/about_flags.cc7
-rw-r--r--content/browser/renderer_host/media/video_capture_controller.cc21
-rw-r--r--content/browser/renderer_host/render_process_host_impl.cc1
-rw-r--r--media/base/decoder_buffer.cc48
-rw-r--r--media/base/decoder_buffer.h14
-rw-r--r--media/base/decoder_buffer_unittest.cc11
-rw-r--r--media/base/media_switches.cc3
-rw-r--r--media/base/media_switches.h2
-rw-r--r--media/base/simd/convert_yuv_to_rgb.h60
-rw-r--r--media/base/simd/convert_yuv_to_rgb_c.cc78
-rw-r--r--media/base/simd/convert_yuv_to_rgb_x86.cc31
-rw-r--r--media/base/simd/convert_yuva_to_argb_mmx.asm23
-rw-r--r--media/base/simd/convert_yuva_to_argb_mmx.inc174
-rw-r--r--media/base/simd/yuv_to_rgb_table.cc85
-rw-r--r--media/base/simd/yuv_to_rgb_table.h2
-rw-r--r--media/base/video_frame.cc27
-rw-r--r--media/base/video_frame.h4
-rw-r--r--media/base/video_util.cc10
-rw-r--r--media/base/video_util.h10
-rw-r--r--media/base/yuv_convert.cc30
-rw-r--r--media/base/yuv_convert.h15
-rw-r--r--media/ffmpeg/ffmpeg_common.cc10
-rw-r--r--media/filters/ffmpeg_demuxer.cc17
-rw-r--r--media/filters/pipeline_integration_test.cc8
-rw-r--r--media/filters/skcanvas_video_renderer.cc119
-rw-r--r--media/filters/vpx_video_decoder.cc144
-rw-r--r--media/filters/vpx_video_decoder.h26
-rw-r--r--media/media.gyp2
-rw-r--r--webkit/media/filter_helpers.cc11
31 files changed, 900 insertions, 103 deletions
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc
index 11bfc31..2e68ea9 100644
--- a/cc/resources/video_resource_updater.cc
+++ b/cc/resources/video_resource_updater.cc
@@ -46,6 +46,7 @@ bool VideoResourceUpdater::VerifyFrame(
switch (video_frame->format()) {
// Acceptable inputs.
case media::VideoFrame::YV12:
+ case media::VideoFrame::YV12A:
case media::VideoFrame::YV16:
case media::VideoFrame::NATIVE_TEXTURE:
#if defined(GOOGLE_TV)
@@ -76,6 +77,7 @@ static gfx::Size SoftwarePlaneDimension(
switch (input_frame_format) {
case media::VideoFrame::YV12:
+ case media::VideoFrame::YV12A:
return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 0.5f));
case media::VideoFrame::YV16:
return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 1.f));
@@ -113,8 +115,10 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
// Only YUV software video frames are supported.
DCHECK(input_frame_format == media::VideoFrame::YV12 ||
+ input_frame_format == media::VideoFrame::YV12A ||
input_frame_format == media::VideoFrame::YV16);
if (input_frame_format != media::VideoFrame::YV12 &&
+ input_frame_format != media::VideoFrame::YV12A &&
input_frame_format != media::VideoFrame::YV16)
return VideoFrameExternalResources();
diff --git a/chrome/app/generated_resources.grd b/chrome/app/generated_resources.grd
index e755f0e..784c111 100644
--- a/chrome/app/generated_resources.grd
+++ b/chrome/app/generated_resources.grd
@@ -6716,6 +6716,12 @@ Keep your key file in a safe place. You will need it to create new versions of y
<message name="IDS_FLAGS_ENABLE_VP9_PLAYBACK_DESCRIPTION" desc="Description for the flag to enable VP9 playback in &lt;video&gt; elements.">
Enable experimental VP9 playback in the video element.
</message>
+ <message name="IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_NAME" desc="Title for the flag to enable VP8 Alpha playback in &lt;video&gt; elements.">
+ Enable VP8 Alpha playback in &lt;video&gt; elements.
+ </message>
+ <message name="IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_DESCRIPTION" desc="Description for the flag to enable VP8 Alpha playback in &lt;video&gt; elements.">
+ Enable experimental VP8 Alpha playback in the video element.
+ </message>
<message name="IDS_FLAGS_ASH_AUTO_WINDOW_PLACEMENT_NAME" desc="Name for the option to enable/disable the auto window placement functionality.">
Automatic window placement.
</message>
diff --git a/chrome/browser/about_flags.cc b/chrome/browser/about_flags.cc
index 563297b..a7bd015 100644
--- a/chrome/browser/about_flags.cc
+++ b/chrome/browser/about_flags.cc
@@ -722,6 +722,13 @@ const Experiment kExperiments[] = {
SINGLE_VALUE_TYPE(switches::kEnableVp9Playback)
},
{
+ "enable-vp8-alpha-playback",
+ IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_NAME,
+ IDS_FLAGS_ENABLE_VP8_ALPHA_PLAYBACK_DESCRIPTION,
+ kOsDesktop,
+ SINGLE_VALUE_TYPE(switches::kEnableVp8AlphaPlayback)
+ },
+ {
"enable-managed-users",
IDS_FLAGS_ENABLE_LOCALLY_MANAGED_USERS_NAME,
IDS_FLAGS_ENABLE_LOCALLY_MANAGED_USERS_DESCRIPTION,
diff --git a/content/browser/renderer_host/media/video_capture_controller.cc b/content/browser/renderer_host/media/video_capture_controller.cc
index 25d41a5..b82b796 100644
--- a/content/browser/renderer_host/media/video_capture_controller.cc
+++ b/content/browser/renderer_host/media/video_capture_controller.cc
@@ -398,6 +398,7 @@ void VideoCaptureController::OnIncomingCapturedVideoFrame(
const int kYPlane = media::VideoFrame::kYPlane;
const int kUPlane = media::VideoFrame::kUPlane;
const int kVPlane = media::VideoFrame::kVPlane;
+ const int kAPlane = media::VideoFrame::kAPlane;
const int kRGBPlane = media::VideoFrame::kRGBPlane;
// Do color conversion from the camera format to I420.
@@ -430,6 +431,26 @@ void VideoCaptureController::OnIncomingCapturedVideoFrame(
target);
break;
}
+ case media::VideoFrame::YV12A: {
+ DCHECK(!chopped_width_ && !chopped_height_);
+ media::CopyYPlane(frame->data(kYPlane),
+ frame->stride(kYPlane),
+ frame->rows(kYPlane),
+ target);
+ media::CopyUPlane(frame->data(kUPlane),
+ frame->stride(kUPlane),
+ frame->rows(kUPlane),
+ target);
+ media::CopyVPlane(frame->data(kVPlane),
+ frame->stride(kVPlane),
+ frame->rows(kVPlane),
+ target);
+ media::CopyAPlane(frame->data(kAPlane),
+ frame->stride(kAPlane),
+ frame->rows(kAPlane),
+ target);
+ break;
+ }
case media::VideoFrame::RGB32: {
media::ConvertRGB32ToYUV(frame->data(kRGBPlane),
target->data(kYPlane),
diff --git a/content/browser/renderer_host/render_process_host_impl.cc b/content/browser/renderer_host/render_process_host_impl.cc
index 53b2822..9f97093 100644
--- a/content/browser/renderer_host/render_process_host_impl.cc
+++ b/content/browser/renderer_host/render_process_host_impl.cc
@@ -868,6 +868,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLineToRenderer(
switches::kEnableViewport,
switches::kEnableOpusPlayback,
switches::kEnableVp9Playback,
+ switches::kEnableVp8AlphaPlayback,
switches::kForceDeviceScaleFactor,
switches::kFullMemoryCrashReport,
#if !defined (GOOGLE_CHROME_BUILD)
diff --git a/media/base/decoder_buffer.cc b/media/base/decoder_buffer.cc
index 03f9bbb..aec4521 100644
--- a/media/base/decoder_buffer.cc
+++ b/media/base/decoder_buffer.cc
@@ -10,12 +10,14 @@
namespace media {
DecoderBuffer::DecoderBuffer(int size)
- : size_(size) {
+ : size_(size),
+ side_data_size_(0) {
Initialize();
}
DecoderBuffer::DecoderBuffer(const uint8* data, int size)
- : size_(size) {
+ : size_(size),
+ side_data_size_(0) {
if (!data) {
CHECK_EQ(size_, 0);
return;
@@ -25,6 +27,20 @@ DecoderBuffer::DecoderBuffer(const uint8* data, int size)
memcpy(data_.get(), data, size_);
}
+DecoderBuffer::DecoderBuffer(const uint8* data, int size,
+ const uint8* side_data, int side_data_size)
+ : size_(size),
+ side_data_size_(side_data_size) {
+ if (!data) {
+ CHECK_EQ(size_, 0);
+ return;
+ }
+
+ Initialize();
+ memcpy(data_.get(), data, size_);
+ memcpy(side_data_.get(), side_data, side_data_size_);
+}
+
DecoderBuffer::~DecoderBuffer() {}
void DecoderBuffer::Initialize() {
@@ -32,6 +48,11 @@ void DecoderBuffer::Initialize() {
data_.reset(reinterpret_cast<uint8*>(
base::AlignedAlloc(size_ + kPaddingSize, kAlignmentSize)));
memset(data_.get() + size_, 0, kPaddingSize);
+ if (side_data_size_ > 0) {
+ side_data_.reset(reinterpret_cast<uint8*>(
+ base::AlignedAlloc(side_data_size_ + kPaddingSize, kAlignmentSize)));
+ memset(side_data_.get() + side_data_size_, 0, kPaddingSize);
+ }
}
// static
@@ -43,6 +64,18 @@ scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data,
}
// static
+scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data,
+ int data_size,
+ const uint8* side_data,
+ int side_data_size) {
+ // If you hit this CHECK you likely have a bug in a demuxer. Go fix it.
+ CHECK(data);
+ CHECK(side_data);
+ return make_scoped_refptr(new DecoderBuffer(data, data_size,
+ side_data, side_data_size));
+}
+
+// static
scoped_refptr<DecoderBuffer> DecoderBuffer::CreateEOSBuffer() {
return make_scoped_refptr(new DecoderBuffer(NULL, 0));
}
@@ -82,6 +115,16 @@ int DecoderBuffer::GetDataSize() const {
return size_;
}
+const uint8* DecoderBuffer::GetSideData() const {
+ DCHECK(!IsEndOfStream());
+ return side_data_.get();
+}
+
+int DecoderBuffer::GetSideDataSize() const {
+ DCHECK(!IsEndOfStream());
+ return side_data_size_;
+}
+
const DecryptConfig* DecoderBuffer::GetDecryptConfig() const {
DCHECK(!IsEndOfStream());
return decrypt_config_.get();
@@ -105,6 +148,7 @@ std::string DecoderBuffer::AsHumanReadableString() {
s << "timestamp: " << timestamp_.InMicroseconds()
<< " duration: " << duration_.InMicroseconds()
<< " size: " << size_
+ << " side_data_size: " << side_data_size_
<< " encrypted: " << (decrypt_config_ != NULL);
return s.str();
}
diff --git a/media/base/decoder_buffer.h b/media/base/decoder_buffer.h
index c23e88f..168ab2c 100644
--- a/media/base/decoder_buffer.h
+++ b/media/base/decoder_buffer.h
@@ -47,6 +47,13 @@ class MEDIA_EXPORT DecoderBuffer
// padded and aligned as necessary. |data| must not be NULL and |size| >= 0.
static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size);
+ // Create a DecoderBuffer whose |data_| is copied from |data| and |side_data_|
+ // is copied from |side_data|. Buffers will be padded and aligned as necessary
+ // Data pointers must not be NULL and sizes must be >= 0.
+ static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size,
+ const uint8* side_data,
+ int side_data_size);
+
// Create a DecoderBuffer indicating we've reached end of stream.
//
// Calling any method other than IsEndOfStream() on the resulting buffer
@@ -64,6 +71,9 @@ class MEDIA_EXPORT DecoderBuffer
int GetDataSize() const;
+ const uint8* GetSideData() const;
+ int GetSideDataSize() const;
+
const DecryptConfig* GetDecryptConfig() const;
void SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config);
@@ -80,6 +90,8 @@ class MEDIA_EXPORT DecoderBuffer
// will be padded and aligned as necessary. If |data| is NULL then |data_| is
// set to NULL and |buffer_size_| to 0.
DecoderBuffer(const uint8* data, int size);
+ DecoderBuffer(const uint8* data, int size,
+ const uint8* side_data, int side_data_size);
virtual ~DecoderBuffer();
private:
@@ -88,6 +100,8 @@ class MEDIA_EXPORT DecoderBuffer
int size_;
scoped_ptr<uint8, base::ScopedPtrAlignedFree> data_;
+ int side_data_size_;
+ scoped_ptr<uint8, base::ScopedPtrAlignedFree> side_data_;
scoped_ptr<DecryptConfig> decrypt_config_;
// Constructor helper method for memory allocations.
diff --git a/media/base/decoder_buffer_unittest.cc b/media/base/decoder_buffer_unittest.cc
index 32c38d0..7880a80 100644
--- a/media/base/decoder_buffer_unittest.cc
+++ b/media/base/decoder_buffer_unittest.cc
@@ -35,6 +35,17 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_EQ(buffer2->GetDataSize(), kDataSize);
EXPECT_EQ(0, memcmp(buffer2->GetData(), kData, kDataSize));
EXPECT_FALSE(buffer2->IsEndOfStream());
+ scoped_refptr<DecoderBuffer> buffer3(DecoderBuffer::CopyFrom(
+ reinterpret_cast<const uint8*>(&kData), kDataSize,
+ reinterpret_cast<const uint8*>(&kData), kDataSize));
+ ASSERT_TRUE(buffer3);
+ EXPECT_NE(kData, buffer3->GetData());
+ EXPECT_EQ(buffer3->GetDataSize(), kDataSize);
+ EXPECT_EQ(0, memcmp(buffer3->GetData(), kData, kDataSize));
+ EXPECT_NE(kData, buffer3->GetSideData());
+ EXPECT_EQ(buffer3->GetSideDataSize(), kDataSize);
+ EXPECT_EQ(0, memcmp(buffer3->GetSideData(), kData, kDataSize));
+ EXPECT_FALSE(buffer3->IsEndOfStream());
}
#if !defined(OS_ANDROID)
diff --git a/media/base/media_switches.cc b/media/base/media_switches.cc
index 04e711e..7c1ab90 100644
--- a/media/base/media_switches.cc
+++ b/media/base/media_switches.cc
@@ -57,6 +57,9 @@ const char kEnableOpusPlayback[] = "enable-opus-playback";
// Enables VP9 playback in media elements.
const char kEnableVp9Playback[] = "enable-vp9-playback";
+// Enables VP8 Alpha playback in media elements.
+const char kEnableVp8AlphaPlayback[] = "enable-vp8-alpha-playback";
+
#if defined(OS_WIN)
const char kWaveOutBuffers[] = "waveout-buffers";
#endif
diff --git a/media/base/media_switches.h b/media/base/media_switches.h
index ea32fae..e9b6ab9 100644
--- a/media/base/media_switches.h
+++ b/media/base/media_switches.h
@@ -44,6 +44,8 @@ MEDIA_EXPORT extern const char kEnableOpusPlayback[];
MEDIA_EXPORT extern const char kEnableVp9Playback[];
+MEDIA_EXPORT extern const char kEnableVp8AlphaPlayback[];
+
#if defined(OS_WIN)
MEDIA_EXPORT extern const char kWaveOutBuffers[];
#endif
diff --git a/media/base/simd/convert_yuv_to_rgb.h b/media/base/simd/convert_yuv_to_rgb.h
index d05f039..7db35b5 100644
--- a/media/base/simd/convert_yuv_to_rgb.h
+++ b/media/base/simd/convert_yuv_to_rgb.h
@@ -21,6 +21,19 @@ typedef void (*ConvertYUVToRGB32Proc)(const uint8*,
int,
YUVType);
+typedef void (*ConvertYUVAToARGBProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ int,
+ int,
+ int,
+ int,
+ int,
+ int,
+ YUVType);
+
void ConvertYUVToRGB32_C(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
@@ -32,6 +45,19 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
int rgbstride,
YUVType yuv_type);
+void ConvertYUVAToARGB_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int avstride,
+ int rgbstride,
+ YUVType yuv_type);
+
void ConvertYUVToRGB32_SSE(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
@@ -54,6 +80,19 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane,
int rgbstride,
YUVType yuv_type);
+void ConvertYUVAToARGB_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int avstride,
+ int rgbstride,
+ YUVType yuv_type);
+
} // namespace media
// Assembly functions are declared without namespace.
@@ -72,6 +111,13 @@ typedef void (*ConvertYUVToRGB32RowProc)(const uint8*,
uint8*,
ptrdiff_t);
+typedef void (*ConvertYUVAToARGBRowProc)(const uint8*,
+ const uint8*,
+ const uint8*,
+ const uint8*,
+ uint8*,
+ ptrdiff_t);
+
typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
const uint8*,
const uint8*,
@@ -85,12 +131,26 @@ void ConvertYUVToRGB32Row_C(const uint8* yplane,
uint8* rgbframe,
ptrdiff_t width);
+void ConvertYUVAToARGBRow_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
void ConvertYUVToRGB32Row_MMX(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
uint8* rgbframe,
ptrdiff_t width);
+void ConvertYUVAToARGBRow_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ ptrdiff_t width);
+
void ConvertYUVToRGB32Row_SSE(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
diff --git a/media/base/simd/convert_yuv_to_rgb_c.cc b/media/base/simd/convert_yuv_to_rgb_c.cc
index 2849cac..e09ed13 100644
--- a/media/base/simd/convert_yuv_to_rgb_c.cc
+++ b/media/base/simd/convert_yuv_to_rgb_c.cc
@@ -39,6 +39,34 @@ static inline void ConvertYUVToRGB32_C(uint8 y,
(packuswb(a) << 24);
}
+static inline void ConvertYUVAToARGB_C(uint8 y,
+ uint8 u,
+ uint8 v,
+ uint8 a,
+ uint8* rgb_buf) {
+ int b = kCoefficientsRgbY[256+u][0];
+ int g = kCoefficientsRgbY[256+u][1];
+ int r = kCoefficientsRgbY[256+u][2];
+
+ b = paddsw(b, kCoefficientsRgbY[512+v][0]);
+ g = paddsw(g, kCoefficientsRgbY[512+v][1]);
+ r = paddsw(r, kCoefficientsRgbY[512+v][2]);
+
+ b = paddsw(b, kCoefficientsRgbY[y][0]);
+ g = paddsw(g, kCoefficientsRgbY[y][1]);
+ r = paddsw(r, kCoefficientsRgbY[y][2]);
+
+ b >>= 6;
+ g >>= 6;
+ r >>= 6;
+
+ b = packuswb(b) * a >> 8;
+ g = packuswb(g) * a >> 8;
+ r = packuswb(r) * a >> 8;
+
+ *reinterpret_cast<uint32*>(rgb_buf) = b | (g << 8) | (r << 16) | (a << 24);
+}
+
extern "C" {
void ConvertYUVToRGB32Row_C(const uint8* y_buf,
@@ -59,6 +87,27 @@ void ConvertYUVToRGB32Row_C(const uint8* y_buf,
}
}
+void ConvertYUVAToARGBRow_C(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ const uint8* a_buf,
+ uint8* rgba_buf,
+ ptrdiff_t width) {
+ for (int x = 0; x < width; x += 2) {
+ uint8 u = u_buf[x >> 1];
+ uint8 v = v_buf[x >> 1];
+ uint8 y0 = y_buf[x];
+ uint8 a0 = a_buf[x];
+ ConvertYUVAToARGB_C(y0, u, v, a0, rgba_buf);
+ if ((x + 1) < width) {
+ uint8 y1 = y_buf[x + 1];
+ uint8 a1 = a_buf[x + 1];
+ ConvertYUVAToARGB_C(y1, u, v, a1, rgba_buf + 4);
+ }
+ rgba_buf += 8; // Advance 2 pixels.
+ }
+}
+
// 16.16 fixed point is used. A shift by 16 isolates the integer.
// A shift by 17 is used to further subsample the chrominence channels.
// & 0xffff isolates the fixed point fraction. >> 2 to get the upper 2 bits,
@@ -161,4 +210,33 @@ void ConvertYUVToRGB32_C(const uint8* yplane,
}
}
+void ConvertYUVAToARGB_C(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbaframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbastride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; y++) {
+ uint8* rgba_row = rgbaframe + y * rgbastride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+ const uint8* a_ptr = aplane + y * astride;
+
+ ConvertYUVAToARGBRow_C(y_ptr,
+ u_ptr,
+ v_ptr,
+ a_ptr,
+ rgba_row,
+ width);
+ }
+}
+
} // namespace media
diff --git a/media/base/simd/convert_yuv_to_rgb_x86.cc b/media/base/simd/convert_yuv_to_rgb_x86.cc
index 37b168d..d1d6e16 100644
--- a/media/base/simd/convert_yuv_to_rgb_x86.cc
+++ b/media/base/simd/convert_yuv_to_rgb_x86.cc
@@ -40,6 +40,37 @@ void ConvertYUVToRGB32_MMX(const uint8* yplane,
EmptyRegisterState();
}
+void ConvertYUVAToARGB_MMX(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type) {
+ unsigned int y_shift = yuv_type;
+ for (int y = 0; y < height; ++y) {
+ uint8* rgb_row = rgbframe + y * rgbstride;
+ const uint8* y_ptr = yplane + y * ystride;
+ const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
+ const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
+ const uint8* a_ptr = aplane + y * astride;
+
+ ConvertYUVAToARGBRow_MMX(y_ptr,
+ u_ptr,
+ v_ptr,
+ a_ptr,
+ rgb_row,
+ width);
+ }
+
+ EmptyRegisterState();
+}
+
void ConvertYUVToRGB32_SSE(const uint8* yplane,
const uint8* uplane,
const uint8* vplane,
diff --git a/media/base/simd/convert_yuva_to_argb_mmx.asm b/media/base/simd/convert_yuva_to_argb_mmx.asm
new file mode 100644
index 0000000..b39315dc
--- /dev/null
+++ b/media/base/simd/convert_yuva_to_argb_mmx.asm
@@ -0,0 +1,23 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "third_party/x86inc/x86inc.asm"
+
+;
+; This file uses MMX instructions.
+;
+ SECTION_TEXT
+ CPU MMX
+
+; Use movq to save the output.
+%define MOVQ movq
+
+; extern "C" void ConvertYUVAToARGBRow_MMX(const uint8* y_buf,
+; const uint8* u_buf,
+; const uint8* v_buf,
+; const uint8* a_buf,
+; uint8* rgb_buf,
+; ptrdiff_t width);
+%define SYMBOL ConvertYUVAToARGBRow_MMX
+%include "convert_yuva_to_argb_mmx.inc"
diff --git a/media/base/simd/convert_yuva_to_argb_mmx.inc b/media/base/simd/convert_yuva_to_argb_mmx.inc
new file mode 100644
index 0000000..621100e
--- /dev/null
+++ b/media/base/simd/convert_yuva_to_argb_mmx.inc
@@ -0,0 +1,174 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+ global mangle(SYMBOL) PRIVATE
+ align function_align
+
+; Non-PIC code is the fastest so use this if possible.
+%ifndef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
+ extern mangle(kCoefficientsRgbY)
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
+ add Uq, 1
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
+ add Vq, 1
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq + 1]
+ movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ add Yq, 2
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+
+ ; Multiply ARGB by alpha value.
+ movq mm0, mm1
+ pxor mm2, mm2
+ punpcklbw mm0, mm2
+ punpckhbw mm1, mm2
+ movzx TEMPd, BYTE [Aq]
+ movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ pmullw mm0, mm2
+ psrlw mm0, 8
+ movzx TEMPd, BYTE [Aq + 1]
+ movq mm2, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ add Aq, 2
+ pmullw mm1, mm2
+ psrlw mm1, 8
+ packuswb mm0, mm1
+
+ MOVQ [ARGBq], mm0
+ add ARGBq, 8
+
+.convertend:
+ sub WIDTHq, 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and WIDTHq, 1
+ jz .convertdone
+
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+
+ ; Multiply ARGB by alpha value.
+ pxor mm0, mm0
+ punpcklbw mm1, mm0
+ movzx TEMPd, BYTE [Aq]
+ movq mm0, [mangle(kCoefficientsRgbY) + 6144 + 8 * TEMPq]
+ pmullw mm1, mm0
+ psrlw mm1, 8
+ packuswb mm1, mm1
+
+ movd [ARGBq], mm1
+
+.convertdone:
+ RET
+%endif
+
+; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
+; This code is slower than the above version.
+%ifdef PIC
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 6, 7, 3, Y, U, V, A, ARGB, WIDTH, TEMP
+ extern mangle(kCoefficientsRgbY)
+ PUSH WIDTHq
+ DEFINE_ARGS Y, U, V, A, ARGB, TABLE, TEMP
+ LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
+ jmp .convertend
+
+.convertloop:
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ add Uq, 1
+
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ add Vq, 1
+
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+
+ movzx TEMPd, BYTE [Yq + 1]
+ movq mm2, [TABLEq + 8 * TEMPq]
+ add Yq, 2
+
+ ; Add UV components to Y component.
+ paddsw mm1, mm0
+ paddsw mm2, mm0
+
+ ; Down shift and then pack.
+ psraw mm1, 6
+ psraw mm2, 6
+ packuswb mm1, mm2
+
+ ; Unpack and multiply by alpha value, then repack high bytes of words.
+ movq mm0, mm1
+ pxor mm2, mm2
+ punpcklbw mm0, mm2
+ punpckhbw mm1, mm2
+ movzx TEMPd, BYTE [Aq]
+ movq mm2, [TABLEq + 6144 + 8 * TEMPq]
+ pmullw mm0, mm2
+ psrlw mm0, 8
+ movzx TEMPd, BYTE [Aq + 1]
+ movq mm2, [TABLEq + 6144 + 8 * TEMPq]
+ add Aq, 2
+ pmullw mm1, mm2
+ psrlw mm1, 8
+ packuswb mm0, mm1
+
+ MOVQ [ARGBq], mm0
+ add ARGBq, 8
+
+.convertend:
+ sub dword [rsp], 2
+ jns .convertloop
+
+ ; If number of pixels is odd then compute it.
+ and dword [rsp], 1
+ jz .convertdone
+
+ movzx TEMPd, BYTE [Uq]
+ movq mm0, [TABLEq + 2048 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Vq]
+ paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
+ movzx TEMPd, BYTE [Yq]
+ movq mm1, [TABLEq + 8 * TEMPq]
+ paddsw mm1, mm0
+ psraw mm1, 6
+ packuswb mm1, mm1
+
+ ; Multiply ARGB by alpha value.
+ pxor mm0, mm0
+ punpcklbw mm1, mm0
+ movzx TEMPd, BYTE [Aq]
+ movq mm0, [TABLEq + 6144 + 8 * TEMPq]
+ pmullw mm1, mm0
+ psrlw mm1, 8
+ packuswb mm1, mm1
+
+ movd [ARGBq], mm1
+
+.convertdone:
+ POP TABLEq
+ RET
+%endif \ No newline at end of file
diff --git a/media/base/simd/yuv_to_rgb_table.cc b/media/base/simd/yuv_to_rgb_table.cc
index f998e85..3789969 100644
--- a/media/base/simd/yuv_to_rgb_table.cc
+++ b/media/base/simd/yuv_to_rgb_table.cc
@@ -6,6 +6,7 @@
extern "C" {
+// Defines the R,G,B,A contributions from Y.
#define RGBY(i) { \
static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
@@ -13,6 +14,9 @@ extern "C" {
0 \
}
+// Defines the R,G,B,A contributions from U.
+// The contribution to A is the same for any value of U
+// causing the final A value to be 255 in every conversion.
#define RGBU(i) { \
static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
@@ -20,6 +24,7 @@ extern "C" {
static_cast<int16>(256 * 64 - 1) \
}
+// Defines the R,G,B,A contributions from V.
#define RGBV(i) { \
0, \
static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
@@ -27,7 +32,18 @@ extern "C" {
0 \
}
-SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 3][4]) = {
+// Used to define a set of multiplier words for each alpha level.
+#define ALPHA(i) { \
+ i, i, i, i \
+}
+
+// The following table defines the RGBA contributions
+// for each component of YUVA. The Y table is first followed
+// by the U, and V tables. The alpha multiplier table follows.
+// These tables are aligned and kept adjacent to optimize for
+// SIMD and cacheing.
+
+SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 4][4]) = {
RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
@@ -224,10 +240,77 @@ SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 3][4]) = {
RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
+
+ // Alpha multipliers for each alpha level.
+ ALPHA(0x00), ALPHA(0x01), ALPHA(0x02), ALPHA(0x03),
+ ALPHA(0x04), ALPHA(0x05), ALPHA(0x06), ALPHA(0x07),
+ ALPHA(0x08), ALPHA(0x09), ALPHA(0x0A), ALPHA(0x0B),
+ ALPHA(0x0C), ALPHA(0x0D), ALPHA(0x0E), ALPHA(0x0F),
+ ALPHA(0x10), ALPHA(0x11), ALPHA(0x12), ALPHA(0x13),
+ ALPHA(0x14), ALPHA(0x15), ALPHA(0x16), ALPHA(0x17),
+ ALPHA(0x18), ALPHA(0x19), ALPHA(0x1A), ALPHA(0x1B),
+ ALPHA(0x1C), ALPHA(0x1D), ALPHA(0x1E), ALPHA(0x1F),
+ ALPHA(0x20), ALPHA(0x21), ALPHA(0x22), ALPHA(0x23),
+ ALPHA(0x24), ALPHA(0x25), ALPHA(0x26), ALPHA(0x27),
+ ALPHA(0x28), ALPHA(0x29), ALPHA(0x2A), ALPHA(0x2B),
+ ALPHA(0x2C), ALPHA(0x2D), ALPHA(0x2E), ALPHA(0x2F),
+ ALPHA(0x30), ALPHA(0x31), ALPHA(0x32), ALPHA(0x33),
+ ALPHA(0x34), ALPHA(0x35), ALPHA(0x36), ALPHA(0x37),
+ ALPHA(0x38), ALPHA(0x39), ALPHA(0x3A), ALPHA(0x3B),
+ ALPHA(0x3C), ALPHA(0x3D), ALPHA(0x3E), ALPHA(0x3F),
+ ALPHA(0x40), ALPHA(0x41), ALPHA(0x42), ALPHA(0x43),
+ ALPHA(0x44), ALPHA(0x45), ALPHA(0x46), ALPHA(0x47),
+ ALPHA(0x48), ALPHA(0x49), ALPHA(0x4A), ALPHA(0x4B),
+ ALPHA(0x4C), ALPHA(0x4D), ALPHA(0x4E), ALPHA(0x4F),
+ ALPHA(0x50), ALPHA(0x51), ALPHA(0x52), ALPHA(0x53),
+ ALPHA(0x54), ALPHA(0x55), ALPHA(0x56), ALPHA(0x57),
+ ALPHA(0x58), ALPHA(0x59), ALPHA(0x5A), ALPHA(0x5B),
+ ALPHA(0x5C), ALPHA(0x5D), ALPHA(0x5E), ALPHA(0x5F),
+ ALPHA(0x60), ALPHA(0x61), ALPHA(0x62), ALPHA(0x63),
+ ALPHA(0x64), ALPHA(0x65), ALPHA(0x66), ALPHA(0x67),
+ ALPHA(0x68), ALPHA(0x69), ALPHA(0x6A), ALPHA(0x6B),
+ ALPHA(0x6C), ALPHA(0x6D), ALPHA(0x6E), ALPHA(0x6F),
+ ALPHA(0x70), ALPHA(0x71), ALPHA(0x72), ALPHA(0x73),
+ ALPHA(0x74), ALPHA(0x75), ALPHA(0x76), ALPHA(0x77),
+ ALPHA(0x78), ALPHA(0x79), ALPHA(0x7A), ALPHA(0x7B),
+ ALPHA(0x7C), ALPHA(0x7D), ALPHA(0x7E), ALPHA(0x7F),
+ ALPHA(0x80), ALPHA(0x81), ALPHA(0x82), ALPHA(0x83),
+ ALPHA(0x84), ALPHA(0x85), ALPHA(0x86), ALPHA(0x87),
+ ALPHA(0x88), ALPHA(0x89), ALPHA(0x8A), ALPHA(0x8B),
+ ALPHA(0x8C), ALPHA(0x8D), ALPHA(0x8E), ALPHA(0x8F),
+ ALPHA(0x90), ALPHA(0x91), ALPHA(0x92), ALPHA(0x93),
+ ALPHA(0x94), ALPHA(0x95), ALPHA(0x96), ALPHA(0x97),
+ ALPHA(0x98), ALPHA(0x99), ALPHA(0x9A), ALPHA(0x9B),
+ ALPHA(0x9C), ALPHA(0x9D), ALPHA(0x9E), ALPHA(0x9F),
+ ALPHA(0xA0), ALPHA(0xA1), ALPHA(0xA2), ALPHA(0xA3),
+ ALPHA(0xA4), ALPHA(0xA5), ALPHA(0xA6), ALPHA(0xA7),
+ ALPHA(0xA8), ALPHA(0xA9), ALPHA(0xAA), ALPHA(0xAB),
+ ALPHA(0xAC), ALPHA(0xAD), ALPHA(0xAE), ALPHA(0xAF),
+ ALPHA(0xB0), ALPHA(0xB1), ALPHA(0xB2), ALPHA(0xB3),
+ ALPHA(0xB4), ALPHA(0xB5), ALPHA(0xB6), ALPHA(0xB7),
+ ALPHA(0xB8), ALPHA(0xB9), ALPHA(0xBA), ALPHA(0xBB),
+ ALPHA(0xBC), ALPHA(0xBD), ALPHA(0xBE), ALPHA(0xBF),
+ ALPHA(0xC0), ALPHA(0xC1), ALPHA(0xC2), ALPHA(0xC3),
+ ALPHA(0xC4), ALPHA(0xC5), ALPHA(0xC6), ALPHA(0xC7),
+ ALPHA(0xC8), ALPHA(0xC9), ALPHA(0xCA), ALPHA(0xCB),
+ ALPHA(0xCC), ALPHA(0xCD), ALPHA(0xCE), ALPHA(0xCF),
+ ALPHA(0xD0), ALPHA(0xD1), ALPHA(0xD2), ALPHA(0xD3),
+ ALPHA(0xD4), ALPHA(0xD5), ALPHA(0xD6), ALPHA(0xD7),
+ ALPHA(0xD8), ALPHA(0xD9), ALPHA(0xDA), ALPHA(0xDB),
+ ALPHA(0xDC), ALPHA(0xDD), ALPHA(0xDE), ALPHA(0xDF),
+ ALPHA(0xE0), ALPHA(0xE1), ALPHA(0xE2), ALPHA(0xE3),
+ ALPHA(0xE4), ALPHA(0xE5), ALPHA(0xE6), ALPHA(0xE7),
+ ALPHA(0xE8), ALPHA(0xE9), ALPHA(0xEA), ALPHA(0xEB),
+ ALPHA(0xEC), ALPHA(0xED), ALPHA(0xEE), ALPHA(0xEF),
+ ALPHA(0xF0), ALPHA(0xF1), ALPHA(0xF2), ALPHA(0xF3),
+ ALPHA(0xF4), ALPHA(0xF5), ALPHA(0xF6), ALPHA(0xF7),
+ ALPHA(0xF8), ALPHA(0xF9), ALPHA(0xFA), ALPHA(0xFB),
+ ALPHA(0xFC), ALPHA(0xFD), ALPHA(0xFE), ALPHA(0xFF),
};
#undef RGBY
#undef RGBU
#undef RGBV
+#undef ALPHA
} // extern "C"
diff --git a/media/base/simd/yuv_to_rgb_table.h b/media/base/simd/yuv_to_rgb_table.h
index 0c43a7a..3ed7bd9 100644
--- a/media/base/simd/yuv_to_rgb_table.h
+++ b/media/base/simd/yuv_to_rgb_table.h
@@ -19,7 +19,7 @@ extern "C" {
#endif
// Align the table to 16-bytes to allow faster reading.
-extern SIMD_ALIGNED(int16 kCoefficientsRgbY[768][4]);
+extern SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 4][4]);
} // extern "C"
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 62e81ca..6ca026c 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -32,6 +32,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
frame->AllocateRGB(4u);
break;
case VideoFrame::YV12:
+ case VideoFrame::YV12A:
case VideoFrame::YV16:
frame->AllocateYUV();
break;
@@ -167,6 +168,8 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::YV12:
case VideoFrame::YV16:
return 3;
+ case VideoFrame::YV12A:
+ return 4;
case VideoFrame::EMPTY:
case VideoFrame::I420:
case VideoFrame::INVALID:
@@ -204,7 +207,8 @@ void VideoFrame::AllocateRGB(size_t bytes_per_pixel) {
}
void VideoFrame::AllocateYUV() {
- DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16);
+ DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
+ format_ == VideoFrame::YV12A);
// Align Y rows at least at 16 byte boundaries. The stride for both
// YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
// U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
@@ -213,7 +217,9 @@ void VideoFrame::AllocateYUV() {
// YV16. We also round the height of the surface allocated to be an even
// number to avoid any potential of faulting by code that attempts to access
// the Y values of the final row, but assumes that the last row of U & V
- // applies to a full two rows of Y.
+ // applies to a full two rows of Y. YV12A is the same as YV12, but with an
+ // additional alpha plane that has the same size and alignment as the Y plane.
+
size_t y_stride = RoundUp(row_bytes(VideoFrame::kYPlane),
kFrameSizeAlignment);
size_t uv_stride = RoundUp(row_bytes(VideoFrame::kUPlane),
@@ -222,9 +228,12 @@ void VideoFrame::AllocateYUV() {
// and then the size needs to be a multiple of two macroblocks (vertically).
// See libavcodec/utils.c:avcodec_align_dimensions2().
size_t y_height = RoundUp(coded_size_.height(), kFrameSizeAlignment * 2);
- size_t uv_height = format_ == VideoFrame::YV12 ? y_height / 2 : y_height;
+ size_t uv_height = (format_ == VideoFrame::YV12 ||
+ format_ == VideoFrame::YV12A) ?
+ y_height / 2 : y_height;
size_t y_bytes = y_height * y_stride;
size_t uv_bytes = uv_height * uv_stride;
+ size_t a_bytes = format_ == VideoFrame::YV12A ? y_bytes : 0;
// The extra line of UV being allocated is because h264 chroma MC
// overreads by one line in some cases, see libavcodec/utils.c:
@@ -232,7 +241,7 @@ void VideoFrame::AllocateYUV() {
// put_h264_chroma_mc4_ssse3().
uint8* data = reinterpret_cast<uint8*>(
base::AlignedAlloc(
- y_bytes + (uv_bytes * 2 + uv_stride) + kFrameSizePadding,
+ y_bytes + (uv_bytes * 2 + uv_stride) + a_bytes + kFrameSizePadding,
kFrameAddressAlignment));
no_longer_needed_cb_ = base::Bind(&ReleaseData, data);
COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0);
@@ -242,6 +251,10 @@ void VideoFrame::AllocateYUV() {
strides_[VideoFrame::kYPlane] = y_stride;
strides_[VideoFrame::kUPlane] = uv_stride;
strides_[VideoFrame::kVPlane] = uv_stride;
+ if (format_ == YV12A) {
+ data_[VideoFrame::kAPlane] = data + y_bytes + (2 * uv_bytes);
+ strides_[VideoFrame::kAPlane] = y_stride;
+ }
}
VideoFrame::VideoFrame(VideoFrame::Format format,
@@ -285,7 +298,8 @@ int VideoFrame::row_bytes(size_t plane) const {
// Planar, 8bpp.
case YV12:
case YV16:
- if (plane == kYPlane)
+ case YV12A:
+ if (plane == kYPlane || plane == kAPlane)
return width;
return RoundUp(width, 2) / 2;
@@ -307,7 +321,8 @@ int VideoFrame::rows(size_t plane) const {
return height;
case YV12:
- if (plane == kYPlane)
+ case YV12A:
+ if (plane == kYPlane || plane == kAPlane)
return height;
return RoundUp(height, 2) / 2;
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 9a6f0a6..6cd5a0f 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -24,13 +24,14 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
};
enum {
- kMaxPlanes = 3,
+ kMaxPlanes = 4,
kRGBPlane = 0,
kYPlane = 0,
kUPlane = 1,
kVPlane = 2,
+ kAPlane = 3,
};
// Surface formats roughly based on FOURCC labels, see:
@@ -47,6 +48,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
#if defined(GOOGLE_TV)
HOLE = 13, // Hole frame.
#endif
+ YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
};
// Creates a new frame in system memory with given parameters. Buffers for
diff --git a/media/base/video_util.cc b/media/base/video_util.cc
index 972ac17..e1de7bd 100644
--- a/media/base/video_util.cc
+++ b/media/base/video_util.cc
@@ -62,6 +62,16 @@ void CopyVPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
CopyPlane(VideoFrame::kVPlane, source, stride, rows, frame);
}
+void CopyAPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
+ CopyPlane(VideoFrame::kAPlane, source, stride, rows, frame);
+}
+
+void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame) {
+ int rows_to_clear = std::min(frame->rows(VideoFrame::kAPlane), rows);
+ memset(frame->data(VideoFrame::kAPlane), 255,
+ frame->stride(VideoFrame::kAPlane) * rows_to_clear);
+}
+
void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v) {
// Fill the Y plane.
uint8* y_plane = frame->data(VideoFrame::kYPlane);
diff --git a/media/base/video_util.h b/media/base/video_util.h
index 27156fa..05e5927 100644
--- a/media/base/video_util.h
+++ b/media/base/video_util.h
@@ -19,7 +19,7 @@ MEDIA_EXPORT gfx::Size GetNaturalSize(const gfx::Size& visible_size,
int aspect_ratio_numerator,
int aspect_ratio_denominator);
-// Copies a plane of YUV source into a VideoFrame object, taking into account
+// Copies a plane of YUV(A) source into a VideoFrame object, taking into account
// source and destinations dimensions.
//
// NOTE: rows is *not* the same as height!
@@ -29,8 +29,14 @@ MEDIA_EXPORT void CopyUPlane(const uint8* source, int stride, int rows,
VideoFrame* frame);
MEDIA_EXPORT void CopyVPlane(const uint8* source, int stride, int rows,
VideoFrame* frame);
+MEDIA_EXPORT void CopyAPlane(const uint8* source, int stride, int rows,
+ VideoFrame* frame);
+
+// Sets alpha plane values to be completely opaque (all 255's).
+MEDIA_EXPORT void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame);
+
// |plane| is one of VideoFrame::kYPlane, VideoFrame::kUPlane,
-// or VideoFrame::kVPlane.
+// VideoFrame::kVPlane or VideoFrame::kAPlane
MEDIA_EXPORT void CopyPlane(size_t plane, const uint8* source, int stride,
int rows, VideoFrame* frame);
diff --git a/media/base/yuv_convert.cc b/media/base/yuv_convert.cc
index 85b0699..1d09a24 100644
--- a/media/base/yuv_convert.cc
+++ b/media/base/yuv_convert.cc
@@ -603,4 +603,34 @@ void ConvertYUVToRGB32(const uint8* yplane,
#endif
}
+void ConvertYUVAToARGB(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type) {
+#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+ ConvertYUVAToARGB_C(yplane, uplane, vplane, aplane, rgbframe,
+ width, height, ystride, uvstride, astride, rgbstride,
+ yuv_type);
+#else
+ static ConvertYUVAToARGBProc convert_proc = NULL;
+ if (!convert_proc) {
+ base::CPU cpu;
+ if (cpu.has_mmx())
+ convert_proc = &ConvertYUVAToARGB_MMX;
+ else
+ convert_proc = &ConvertYUVAToARGB_C;
+ }
+ convert_proc(yplane, uplane, vplane, aplane, rgbframe,
+ width, height, ystride, uvstride, astride, rgbstride, yuv_type);
+#endif
+}
+
} // namespace media
diff --git a/media/base/yuv_convert.h b/media/base/yuv_convert.h
index 30c07ab..0e53193 100644
--- a/media/base/yuv_convert.h
+++ b/media/base/yuv_convert.h
@@ -60,6 +60,21 @@ void ConvertYUVToRGB32(const uint8* yplane,
int rgbstride,
YUVType yuv_type);
+// Convert a frame of YUVA to 32 bit ARGB.
+// Pass in YV12A
+void ConvertYUVAToARGB(const uint8* yplane,
+ const uint8* uplane,
+ const uint8* vplane,
+ const uint8* aplane,
+ uint8* rgbframe,
+ int width,
+ int height,
+ int ystride,
+ int uvstride,
+ int astride,
+ int rgbstride,
+ YUVType yuv_type);
+
// Scale a frame of YUV to 32 bit ARGB.
// Supports rotation and mirroring.
void ScaleYUVToRGB32(const uint8* yplane,
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index 12a4ec9..cc7dfc2 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -382,6 +382,12 @@ void AVStreamToVideoDecoderConfig(
if (key)
is_encrypted = true;
+ AVDictionaryEntry* webm_alpha =
+ av_dict_get(stream->metadata, "alpha_mode", NULL, 0);
+ if (webm_alpha && !strcmp(webm_alpha->value, "1")) {
+ format = VideoFrame::YV12A;
+ }
+
config->Initialize(codec,
profile,
format,
@@ -490,6 +496,8 @@ VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
case PIX_FMT_YUV420P:
case PIX_FMT_YUVJ420P:
return VideoFrame::YV12;
+ case PIX_FMT_YUVA420P:
+ return VideoFrame::YV12A;
default:
DVLOG(1) << "Unsupported PixelFormat: " << pixel_format;
}
@@ -502,6 +510,8 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
return PIX_FMT_YUV422P;
case VideoFrame::YV12:
return PIX_FMT_YUV420P;
+ case VideoFrame::YV12A:
+ return PIX_FMT_YUVA420P;
default:
DVLOG(1) << "Unsupported VideoFrame::Format: " << video_format;
}
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index fec580a..954b3ac 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -108,11 +108,26 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
LOG(ERROR) << "Format conversion failed.";
}
+ // Get side data if any. For now, the only type of side_data is VP8 Alpha. We
+ // keep this generic so that other side_data types in the future can be
+ // handled the same way as well.
+ av_packet_split_side_data(packet.get());
+ int side_data_size = 0;
+ uint8* side_data = av_packet_get_side_data(
+ packet.get(),
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+ &side_data_size);
+
// If a packet is returned by FFmpeg's av_parser_parse2() the packet will
// reference inner memory of FFmpeg. As such we should transfer the packet
// into memory we control.
scoped_refptr<DecoderBuffer> buffer;
- buffer = DecoderBuffer::CopyFrom(packet->data, packet->size);
+ if (side_data_size > 0) {
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
+ side_data, side_data_size);
+ } else {
+ buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size);
+ }
if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
(type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc
index 2945db2..ef5694d 100644
--- a/media/filters/pipeline_integration_test.cc
+++ b/media/filters/pipeline_integration_test.cc
@@ -923,4 +923,12 @@ TEST_F(PipelineIntegrationTest, DISABLED_BasicPlayback_VP9_Opus_WebM) {
ASSERT_TRUE(WaitUntilOnEnded());
}
+// Verify that VP8 video with alpha channel can be played back.
+TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_WebM) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8a.webm"),
+ PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
} // namespace media
diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc
index b478a73..94395bf 100644
--- a/media/filters/skcanvas_video_renderer.cc
+++ b/media/filters/skcanvas_video_renderer.cc
@@ -21,6 +21,17 @@ static bool IsEitherYV12OrYV16OrNative(media::VideoFrame::Format format) {
format == media::VideoFrame::NATIVE_TEXTURE;
}
+static bool IsEitherYV12OrYV12AOrYV16(media::VideoFrame::Format format) {
+ return IsEitherYV12OrYV16(format) ||
+ format == media::VideoFrame::YV12A;
+}
+
+static bool IsEitherYV12OrYV12AOrYV16OrNative(
+ media::VideoFrame::Format format) {
+ return IsEitherYV12OrYV16OrNative(format) ||
+ format == media::VideoFrame::YV12A;
+}
+
// CanFastPaint is a helper method to determine the conditions for fast
// painting. The conditions are:
// 1. No skew in canvas matrix.
@@ -70,7 +81,8 @@ static void FastPaint(
const SkBitmap& bitmap = canvas->getDevice()->accessBitmap(true);
media::YUVType yuv_type = media::YV16;
int y_shift = 0;
- if (video_frame->format() == media::VideoFrame::YV12) {
+ if (video_frame->format() == media::VideoFrame::YV12 ||
+ video_frame->format() == media::VideoFrame::YV12A) {
yuv_type = media::YV12;
y_shift = 1;
}
@@ -170,9 +182,9 @@ static void FastPaint(
static void ConvertVideoFrameToBitmap(
const scoped_refptr<media::VideoFrame>& video_frame,
SkBitmap* bitmap) {
- DCHECK(IsEitherYV12OrYV16OrNative(video_frame->format()))
+ DCHECK(IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format()))
<< video_frame->format();
- if (IsEitherYV12OrYV16(video_frame->format())) {
+ if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) {
DCHECK_EQ(video_frame->stride(media::VideoFrame::kUPlane),
video_frame->stride(media::VideoFrame::kVPlane));
}
@@ -189,45 +201,75 @@ static void ConvertVideoFrameToBitmap(
}
bitmap->lockPixels();
- if (IsEitherYV12OrYV16(video_frame->format())) {
- media::YUVType yuv_type = media::YV16;
- int y_shift = 0;
- if (video_frame->format() == media::VideoFrame::YV12) {
- yuv_type = media::YV12;
- y_shift = 1;
- }
+ size_t y_offset = 0;
+ size_t uv_offset = 0;
+ if (IsEitherYV12OrYV12AOrYV16(video_frame->format())) {
+ int y_shift = (video_frame->format() == media::VideoFrame::YV16) ? 0 : 1;
// Use the "left" and "top" of the destination rect to locate the offset
// in Y, U and V planes.
- size_t y_offset = (video_frame->stride(media::VideoFrame::kYPlane) *
- video_frame->visible_rect().y()) +
- video_frame->visible_rect().x();
-
+ y_offset = (video_frame->stride(media::VideoFrame::kYPlane) *
+ video_frame->visible_rect().y()) +
+ video_frame->visible_rect().x();
// For format YV12, there is one U, V value per 2x2 block.
// For format YV16, there is one U, V value per 2x1 block.
- size_t uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) *
- (video_frame->visible_rect().y() >> y_shift)) +
- (video_frame->visible_rect().x() >> 1);
- uint8* frame_clip_y =
- video_frame->data(media::VideoFrame::kYPlane) + y_offset;
- uint8* frame_clip_u =
- video_frame->data(media::VideoFrame::kUPlane) + uv_offset;
- uint8* frame_clip_v =
- video_frame->data(media::VideoFrame::kVPlane) + uv_offset;
-
- media::ConvertYUVToRGB32(frame_clip_y,
- frame_clip_u,
- frame_clip_v,
- static_cast<uint8*>(bitmap->getPixels()),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(media::VideoFrame::kYPlane),
- video_frame->stride(media::VideoFrame::kUPlane),
- bitmap->rowBytes(),
- yuv_type);
- } else {
- DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
- video_frame->ReadPixelsFromNativeTexture(*bitmap);
+ uv_offset = (video_frame->stride(media::VideoFrame::kUPlane) *
+ (video_frame->visible_rect().y() >> y_shift)) +
+ (video_frame->visible_rect().x() >> 1);
+ }
+ switch (video_frame->format()) {
+ case media::VideoFrame::YV12:
+ media::ConvertYUVToRGB32(
+ video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ static_cast<uint8*>(bitmap->getPixels()),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(media::VideoFrame::kYPlane),
+ video_frame->stride(media::VideoFrame::kUPlane),
+ bitmap->rowBytes(),
+ media::YV12);
+ break;
+
+ case media::VideoFrame::YV16:
+ media::ConvertYUVToRGB32(
+ video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ static_cast<uint8*>(bitmap->getPixels()),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(media::VideoFrame::kYPlane),
+ video_frame->stride(media::VideoFrame::kUPlane),
+ bitmap->rowBytes(),
+ media::YV16);
+ break;
+
+ case media::VideoFrame::YV12A:
+ media::ConvertYUVAToARGB(
+ video_frame->data(media::VideoFrame::kYPlane) + y_offset,
+ video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
+ video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
+ video_frame->data(media::VideoFrame::kAPlane),
+ static_cast<uint8*>(bitmap->getPixels()),
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ video_frame->stride(media::VideoFrame::kYPlane),
+ video_frame->stride(media::VideoFrame::kUPlane),
+ video_frame->stride(media::VideoFrame::kAPlane),
+ bitmap->rowBytes(),
+ media::YV12);
+ break;
+
+ case media::VideoFrame::NATIVE_TEXTURE:
+ DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
+ video_frame->ReadPixelsFromNativeTexture(*bitmap);
+ break;
+
+ default:
+ NOTREACHED();
+ break;
}
bitmap->notifyPixelsChanged();
bitmap->unlockPixels();
@@ -255,7 +297,8 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
// Paint black rectangle if there isn't a frame available or the
// frame has an unexpected format.
- if (!video_frame || !IsEitherYV12OrYV16OrNative(video_frame->format())) {
+ if (!video_frame ||
+ !IsEitherYV12OrYV12AOrYV16OrNative(video_frame->format())) {
canvas->drawRect(dest, paint);
return;
}
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index 0b92a7d..76054eb 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/message_loop_proxy.h"
#include "base/string_number_conversions.h"
+#include "base/sys_byteorder.h"
#include "media/base/bind_to_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer_stream.h"
@@ -20,15 +21,6 @@
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
-// Include libvpx header files.
-// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
-// backwards compatibility for legacy applications using the library.
-#define VPX_CODEC_DISABLE_COMPAT 1
-extern "C" {
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
-}
-
namespace media {
// Always try to use three threads for video decoding. There is little reason
@@ -58,13 +50,11 @@ static int GetThreadCount() {
VpxVideoDecoder::VpxVideoDecoder(
const scoped_refptr<base::MessageLoopProxy>& message_loop)
: message_loop_(message_loop),
- state_(kUninitialized),
- vpx_codec_(NULL) {
+ state_(kUninitialized) {
}
VpxVideoDecoder::~VpxVideoDecoder() {
DCHECK_EQ(kUninitialized, state_);
- CloseDecoder();
}
void VpxVideoDecoder::Initialize(
@@ -92,45 +82,60 @@ void VpxVideoDecoder::Initialize(
status_cb.Run(PIPELINE_OK);
}
-bool VpxVideoDecoder::ConfigureDecoder() {
- const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config();
- if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid video stream config: "
- << config.AsHumanReadableString();
- return false;
- }
-
- if (config.codec() != kCodecVP9)
- return false;
-
- CloseDecoder();
-
- vpx_codec_ = new vpx_codec_ctx();
+static scoped_ptr<vpx_codec_ctx, VpxDeleter> InitializeVpxContext(
+ scoped_ptr<vpx_codec_ctx, VpxDeleter> context,
+ const VideoDecoderConfig& config) {
+ context.reset(new vpx_codec_ctx());
vpx_codec_dec_cfg_t vpx_config = {0};
vpx_config.w = config.coded_size().width();
vpx_config.h = config.coded_size().height();
vpx_config.threads = GetThreadCount();
- vpx_codec_err_t status = vpx_codec_dec_init(vpx_codec_,
- vpx_codec_vp9_dx(),
+ vpx_codec_err_t status = vpx_codec_dec_init(context.get(),
+ config.codec() == kCodecVP9 ?
+ vpx_codec_vp9_dx() :
+ vpx_codec_vp8_dx(),
&vpx_config,
0);
if (status != VPX_CODEC_OK) {
LOG(ERROR) << "vpx_codec_dec_init failed, status=" << status;
- delete vpx_codec_;
- vpx_codec_ = NULL;
+ context.reset();
+ }
+ return context.Pass();
+}
+
+bool VpxVideoDecoder::ConfigureDecoder() {
+ const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config();
+ if (!config.IsValidConfig()) {
+ DLOG(ERROR) << "Invalid video stream config: "
+ << config.AsHumanReadableString();
return false;
}
- return true;
-}
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ bool can_handle = false;
+ if (cmd_line->HasSwitch(switches::kEnableVp9Playback) &&
+ config.codec() == kCodecVP9) {
+ can_handle = true;
+ }
+ if (cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) &&
+ config.codec() == kCodecVP8 && config.format() == VideoFrame::YV12A) {
+ can_handle = true;
+ }
+ if (!can_handle)
+ return false;
+
+ vpx_codec_ = InitializeVpxContext(vpx_codec_.Pass(), config);
+ if (!vpx_codec_.get())
+ return false;
-void VpxVideoDecoder::CloseDecoder() {
- if (vpx_codec_) {
- vpx_codec_destroy(vpx_codec_);
- delete vpx_codec_;
- vpx_codec_ = NULL;
+ if (config.format() == VideoFrame::YV12A) {
+ vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_.Pass(), config);
+ if (!vpx_codec_alpha_.get())
+ return false;
}
+
+ return true;
}
void VpxVideoDecoder::Read(const ReadCB& read_cb) {
@@ -246,7 +251,7 @@ void VpxVideoDecoder::DecodeBuffer(
}
// Any successful decode counts!
- if (buffer->GetDataSize()) {
+ if (buffer->GetDataSize() && buffer->GetSideDataSize()) {
PipelineStatistics statistics;
statistics.video_bytes_decoded = buffer->GetDataSize();
statistics_cb_.Run(statistics);
@@ -270,7 +275,7 @@ bool VpxVideoDecoder::Decode(
// Pass |buffer| to libvpx.
int64 timestamp = buffer->GetTimestamp().InMicroseconds();
void* user_priv = reinterpret_cast<void*>(&timestamp);
- vpx_codec_err_t status = vpx_codec_decode(vpx_codec_,
+ vpx_codec_err_t status = vpx_codec_decode(vpx_codec_.get(),
buffer->GetData(),
buffer->GetDataSize(),
user_priv,
@@ -282,7 +287,7 @@ bool VpxVideoDecoder::Decode(
// Gets pointer to decoded data.
vpx_codec_iter_t iter = NULL;
- const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_, &iter);
+ const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_.get(), &iter);
if (!vpx_image) {
*video_frame = NULL;
return true;
@@ -293,7 +298,45 @@ bool VpxVideoDecoder::Decode(
return false;
}
- CopyVpxImageTo(vpx_image, video_frame);
+ const vpx_image_t* vpx_image_alpha = NULL;
+ if (vpx_codec_alpha_.get() && buffer->GetSideDataSize() >= 8) {
+ // Pass alpha data to libvpx.
+ int64 timestamp_alpha = buffer->GetTimestamp().InMicroseconds();
+ void* user_priv_alpha = reinterpret_cast<void*>(&timestamp_alpha);
+
+ // First 8 bytes of side data is side_data_id in big endian.
+ const uint64 side_data_id = base::NetToHost64(
+ *(reinterpret_cast<const uint64*>(buffer->GetSideData())));
+ if (side_data_id == 1) {
+ status = vpx_codec_decode(vpx_codec_alpha_.get(),
+ buffer->GetSideData() + 8,
+ buffer->GetSideDataSize() - 8,
+ user_priv_alpha,
+ 0);
+
+ if (status != VPX_CODEC_OK) {
+ LOG(ERROR) << "vpx_codec_decode() failed on alpha, status=" << status;
+ return false;
+ }
+
+ // Gets pointer to decoded data.
+ vpx_codec_iter_t iter_alpha = NULL;
+ vpx_image_alpha = vpx_codec_get_frame(vpx_codec_alpha_.get(),
+ &iter_alpha);
+ if (!vpx_image_alpha) {
+ *video_frame = NULL;
+ return true;
+ }
+
+ if (vpx_image_alpha->user_priv !=
+ reinterpret_cast<void*>(&timestamp_alpha)) {
+ LOG(ERROR) << "Invalid output timestamp on alpha.";
+ return false;
+ }
+ }
+ }
+
+ CopyVpxImageTo(vpx_image, vpx_image_alpha, video_frame);
(*video_frame)->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp));
return true;
}
@@ -307,7 +350,8 @@ void VpxVideoDecoder::DoReset() {
}
void VpxVideoDecoder::CopyVpxImageTo(
- const vpx_image* vpx_image,
+ const struct vpx_image* vpx_image,
+ const struct vpx_image* vpx_image_alpha,
scoped_refptr<VideoFrame>* video_frame) {
CHECK(vpx_image);
CHECK_EQ(vpx_image->d_w % 2, 0U);
@@ -319,11 +363,14 @@ void VpxVideoDecoder::CopyVpxImageTo(
gfx::Size natural_size =
demuxer_stream_->video_decoder_config().natural_size();
- *video_frame = VideoFrame::CreateFrame(VideoFrame::YV12,
+ *video_frame = VideoFrame::CreateFrame(vpx_codec_alpha_.get() ?
+ VideoFrame::YV12A :
+ VideoFrame::YV12,
size,
gfx::Rect(size),
natural_size,
kNoTimestamp());
+
CopyYPlane(vpx_image->planes[VPX_PLANE_Y],
vpx_image->stride[VPX_PLANE_Y],
vpx_image->d_h,
@@ -336,6 +383,17 @@ void VpxVideoDecoder::CopyVpxImageTo(
vpx_image->stride[VPX_PLANE_V],
vpx_image->d_h / 2,
*video_frame);
+ if (!vpx_codec_alpha_.get())
+ return;
+ if (!vpx_image_alpha) {
+ MakeOpaqueAPlane(vpx_image->stride[VPX_PLANE_Y], vpx_image->d_h,
+ *video_frame);
+ return;
+ }
+ CopyAPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
+ vpx_image->stride[VPX_PLANE_Y],
+ vpx_image->d_h,
+ *video_frame);
}
} // namespace media
diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h
index 77578fd..b363d8b 100644
--- a/media/filters/vpx_video_decoder.h
+++ b/media/filters/vpx_video_decoder.h
@@ -9,6 +9,16 @@
#include "base/memory/ref_counted.h"
#include "media/base/demuxer_stream.h"
#include "media/base/video_decoder.h"
+#include "media/base/video_frame.h"
+
+// Include libvpx header files.
+// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
+// backwards compatibility for legacy applications using the library.
+#define VPX_CODEC_DISABLE_COMPAT 1
+extern "C" {
+#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
+#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+}
struct vpx_codec_ctx;
struct vpx_image;
@@ -19,6 +29,15 @@ class MessageLoopProxy;
namespace media {
+struct VpxDeleter {
+ inline void operator()(vpx_codec_ctx* ptr) const {
+ if (ptr) {
+ vpx_codec_destroy(ptr);
+ delete ptr;
+ }
+ }
+};
+
class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
public:
explicit VpxVideoDecoder(
@@ -47,7 +66,6 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
// Returns true when initialization was successful.
bool ConfigureDecoder();
- void CloseDecoder();
void ReadFromDemuxerStream();
// Carries out the buffer processing operation scheduled by
@@ -62,7 +80,8 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
// Reset decoder and call |reset_cb_|.
void DoReset();
- void CopyVpxImageTo(const vpx_image* vpx_image,
+ void CopyVpxImageTo(const struct vpx_image* vpx_image,
+ const struct vpx_image* vpx_image_alpha,
scoped_refptr<VideoFrame>* video_frame);
scoped_refptr<base::MessageLoopProxy> message_loop_;
@@ -76,7 +95,8 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
// Pointer to the demuxer stream that will feed us compressed buffers.
scoped_refptr<DemuxerStream> demuxer_stream_;
- vpx_codec_ctx* vpx_codec_;
+ scoped_ptr<vpx_codec_ctx, VpxDeleter> vpx_codec_;
+ scoped_ptr<vpx_codec_ctx, VpxDeleter> vpx_codec_alpha_;
DISALLOW_COPY_AND_ASSIGN(VpxVideoDecoder);
};
diff --git a/media/media.gyp b/media/media.gyp
index 0f836bc..1e37b14 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -1169,6 +1169,8 @@
'base/simd/convert_yuv_to_rgb_mmx.inc',
'base/simd/convert_yuv_to_rgb_sse.asm',
'base/simd/convert_yuv_to_rgb_x86.cc',
+ 'base/simd/convert_yuva_to_argb_mmx.asm',
+ 'base/simd/convert_yuva_to_argb_mmx.inc',
'base/simd/empty_register_state_mmx.asm',
'base/simd/filter_yuv.h',
'base/simd/filter_yuv_c.cc',
diff --git a/webkit/media/filter_helpers.cc b/webkit/media/filter_helpers.cc
index c803f69..00cf121 100644
--- a/webkit/media/filter_helpers.cc
+++ b/webkit/media/filter_helpers.cc
@@ -36,20 +36,21 @@ static void AddDefaultDecodersToCollection(
const scoped_refptr<base::MessageLoopProxy>& message_loop,
media::FilterCollection* filter_collection) {
- scoped_refptr<media::FFmpegVideoDecoder> ffmpeg_video_decoder =
- new media::FFmpegVideoDecoder(message_loop);
- filter_collection->GetVideoDecoders()->push_back(ffmpeg_video_decoder);
-
// TODO(phajdan.jr): Remove ifdefs when libvpx with vp9 support is released
// (http://crbug.com/174287) .
#if !defined(MEDIA_DISABLE_LIBVPX)
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableVp9Playback)) {
+ if (cmd_line->HasSwitch(switches::kEnableVp9Playback) ||
+ cmd_line->HasSwitch(switches::kEnableVp8AlphaPlayback) ) {
scoped_refptr<media::VpxVideoDecoder> vpx_video_decoder =
new media::VpxVideoDecoder(message_loop);
filter_collection->GetVideoDecoders()->push_back(vpx_video_decoder);
}
#endif // !defined(MEDIA_DISABLE_LIBVPX)
+
+ scoped_refptr<media::FFmpegVideoDecoder> ffmpeg_video_decoder =
+ new media::FFmpegVideoDecoder(message_loop);
+ filter_collection->GetVideoDecoders()->push_back(ffmpeg_video_decoder);
}
void BuildMediaSourceCollection(