summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
Diffstat (limited to 'media')
-rw-r--r--media/filters/vpx_video_decoder.cc442
-rw-r--r--media/filters/vpx_video_decoder.h26
-rw-r--r--media/renderers/default_renderer_factory.cc2
-rw-r--r--media/test/pipeline_integration_test_base.cc3
4 files changed, 231 insertions, 242 deletions
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index 0a14d3e..d006f6f 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -26,11 +26,7 @@
#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/limits.h"
#include "media/base/media_switches.h"
-#include "media/base/pipeline.h"
-#include "media/base/timestamp_constants.h"
-#include "media/base/video_util.h"
// Include libvpx header files.
// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
@@ -42,6 +38,8 @@ extern "C" {
#include "third_party/libvpx_new/source/libvpx/vpx/vpx_frame_buffer.h"
}
+#include "third_party/libyuv/include/libyuv/convert.h"
+
namespace media {
// Always try to use three threads for video decoding. There is little reason
@@ -78,6 +76,29 @@ static int GetThreadCount(const VideoDecoderConfig& config) {
return decode_threads;
}
+static vpx_codec_ctx* InitializeVpxContext(vpx_codec_ctx* context,
+ const VideoDecoderConfig& config) {
+ context = new vpx_codec_ctx();
+ vpx_codec_dec_cfg_t vpx_config = {0};
+ vpx_config.w = config.coded_size().width();
+ vpx_config.h = config.coded_size().height();
+ vpx_config.threads = GetThreadCount(config);
+
+ vpx_codec_err_t status = vpx_codec_dec_init(
+ context,
+ config.codec() == kCodecVP9 ? vpx_codec_vp9_dx() : vpx_codec_vp8_dx(),
+ &vpx_config, 0 /* flags */);
+ if (status == VPX_CODEC_OK)
+ return context;
+
+ DLOG(ERROR) << "vpx_codec_dec_init() failed: " << vpx_codec_error(context);
+ delete context;
+ return nullptr;
+}
+
+// MemoryPool is a pool of simple CPU memory, allocated by hand and used by both
+// VP9 and any data consumers. This class needs to be ref-counted to hold on to
+// allocated memory via the memory-release callback of CreateFrameCallback().
class VpxVideoDecoder::MemoryPool
: public base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool>,
public base::trace_event::MemoryDumpProvider {
@@ -100,10 +121,10 @@ class VpxVideoDecoder::MemoryPool
static int32 ReleaseVP9FrameBuffer(void *user_priv,
vpx_codec_frame_buffer *fb);
- // Generates a "no_longer_needed" closure that holds a reference
- // to this pool.
+ // Generates a "no_longer_needed" closure that holds a reference to this pool.
base::Closure CreateFrameCallback(void* fb_priv_data);
+ // base::MemoryDumpProvider.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
@@ -137,17 +158,23 @@ class VpxVideoDecoder::MemoryPool
int in_use_by_decoder_ = 0;
// Number of VP9FrameBuffer currently in use by the decoder and a video frame.
int in_use_by_decoder_and_video_frame_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(MemoryPool);
};
-VpxVideoDecoder::MemoryPool::MemoryPool() {}
+VpxVideoDecoder::MemoryPool::MemoryPool() {
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "VpxVideoDecoder", base::ThreadTaskRunnerHandle::Get());
+}
VpxVideoDecoder::MemoryPool::~MemoryPool() {
STLDeleteElements(&frame_buffers_);
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
}
VpxVideoDecoder::MemoryPool::VP9FrameBuffer*
- VpxVideoDecoder::MemoryPool::GetFreeFrameBuffer(size_t min_size) {
+VpxVideoDecoder::MemoryPool::GetFreeFrameBuffer(size_t min_size) {
// Check if a free frame buffer exists.
size_t i = 0;
for (; i < frame_buffers_.size(); ++i) {
@@ -210,8 +237,7 @@ base::Closure VpxVideoDecoder::MemoryPool::CreateFrameCallback(
if (frame_buffer->ref_cnt > 1)
++in_use_by_decoder_and_video_frame_;
return BindToCurrentLoop(
- base::Bind(&MemoryPool::OnVideoFrameDestroyed, this,
- frame_buffer));
+ base::Bind(&MemoryPool::OnVideoFrameDestroyed, this, frame_buffer));
}
bool VpxVideoDecoder::MemoryPool::OnMemoryDump(
@@ -228,9 +254,8 @@ bool VpxVideoDecoder::MemoryPool::OnMemoryDump(
size_t bytes_used = 0;
size_t bytes_reserved = 0;
for (const VP9FrameBuffer* frame_buffer : frame_buffers_) {
- if (frame_buffer->ref_cnt) {
+ if (frame_buffer->ref_cnt)
bytes_used += frame_buffer->data.size();
- }
bytes_reserved += frame_buffer->data.size();
}
@@ -260,15 +285,13 @@ void VpxVideoDecoder::MemoryPool::OnVideoFrameDestroyed(
--in_use_by_decoder_and_video_frame_;
}
-VpxVideoDecoder::VpxVideoDecoder(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
- : task_runner_(task_runner),
- state_(kUninitialized),
- vpx_codec_(NULL),
- vpx_codec_alpha_(NULL) {}
+VpxVideoDecoder::VpxVideoDecoder()
+ : state_(kUninitialized), vpx_codec_(nullptr), vpx_codec_alpha_(nullptr) {
+ thread_checker_.DetachFromThread();
+}
VpxVideoDecoder::~VpxVideoDecoder() {
- DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
CloseDecoder();
}
@@ -277,13 +300,12 @@ std::string VpxVideoDecoder::GetDisplayName() const {
}
void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
- bool low_delay,
+ bool /* low_delay */,
const SetCdmReadyCB& /* set_cdm_ready_cb */,
const InitCB& init_cb,
const OutputCB& output_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(config.IsValidConfig());
- DCHECK(decode_cb_.is_null());
InitCB bound_init_cb = BindToCurrentLoop(init_cb);
@@ -299,32 +321,64 @@ void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
bound_init_cb.Run(true);
}
-static vpx_codec_ctx* InitializeVpxContext(vpx_codec_ctx* context,
- const VideoDecoderConfig& config) {
- context = new vpx_codec_ctx();
- vpx_codec_dec_cfg_t vpx_config = {0};
- vpx_config.w = config.coded_size().width();
- vpx_config.h = config.coded_size().height();
- vpx_config.threads = GetThreadCount(config);
+void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
+ const DecodeCB& decode_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(buffer.get());
+ DCHECK(!decode_cb.is_null());
+ DCHECK_NE(state_, kUninitialized)
+ << "Called Decode() before successful Initialize()";
+
+ DecodeCB bound_decode_cb = BindToCurrentLoop(decode_cb);
+
+ if (state_ == kError) {
+ bound_decode_cb.Run(kDecodeError);
+ return;
+ }
+ if (state_ == kDecodeFinished) {
+ bound_decode_cb.Run(kOk);
+ return;
+ }
+ if (state_ == kNormal && buffer->end_of_stream()) {
+ state_ = kDecodeFinished;
+ bound_decode_cb.Run(kOk);
+ return;
+ }
- vpx_codec_err_t status = vpx_codec_dec_init(context,
- config.codec() == kCodecVP9 ?
- vpx_codec_vp9_dx() :
- vpx_codec_vp8_dx(),
- &vpx_config,
- 0);
- if (status != VPX_CODEC_OK) {
- LOG(ERROR) << "vpx_codec_dec_init failed, status=" << status;
- delete context;
- return NULL;
+ scoped_refptr<VideoFrame> video_frame;
+ if (!VpxDecode(buffer, &video_frame)) {
+ state_ = kError;
+ bound_decode_cb.Run(kDecodeError);
+ return;
}
- return context;
+ // We might get a successfull VpxDecode but not a frame if only a partial
+ // decode happened.
+ if (video_frame.get())
+ output_cb_.Run(video_frame);
+
+ // VideoDecoderShim expects |decode_cb| call after |output_cb_|.
+ bound_decode_cb.Run(kOk);
+}
+
+void VpxVideoDecoder::Reset(const base::Closure& closure) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ state_ = kNormal;
+ // PostTask() to avoid calling |closure| inmediately.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, closure);
}
bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
if (config.codec() != kCodecVP8 && config.codec() != kCodecVP9)
return false;
+ // These are the combinations of codec-pixel format supported in principle.
+ // Note that VP9 does not support Alpha in the current implementation.
+ DCHECK(
+ (config.codec() == kCodecVP8 && config.format() == PIXEL_FORMAT_YV12) ||
+ (config.codec() == kCodecVP8 && config.format() == PIXEL_FORMAT_YV12A) ||
+ (config.codec() == kCodecVP9 && config.format() == PIXEL_FORMAT_YV12) ||
+ (config.codec() == kCodecVP9 && config.format() == PIXEL_FORMAT_YV24));
+
#if !defined(DISABLE_FFMPEG_VIDEO_DECODERS)
// When FFmpegVideoDecoder is available it handles VP8 that doesn't have
// alpha, and VpxVideoDecoder will handle VP8 with alpha.
@@ -338,105 +392,42 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
if (!vpx_codec_)
return false;
- // We use our own buffers for VP9 so that there is no need to copy data after
- // decoding.
+ // Configure VP9 to decode on our buffers to skip a data copy on decoding.
if (config.codec() == kCodecVP9) {
+ DCHECK_NE(PIXEL_FORMAT_YV12A, config.format());
+ DCHECK(vpx_codec_get_caps(vpx_codec_->iface) &
+ VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER);
+
memory_pool_ = new MemoryPool();
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- memory_pool_.get(), "VpxVideoDecoder", task_runner_);
if (vpx_codec_set_frame_buffer_functions(vpx_codec_,
&MemoryPool::GetVP9FrameBuffer,
&MemoryPool::ReleaseVP9FrameBuffer,
memory_pool_.get())) {
- LOG(ERROR) << "Failed to configure external buffers.";
+ DLOG(ERROR) << "Failed to configure external buffers. "
+ << vpx_codec_error(vpx_codec_);
return false;
}
}
- if (config.format() == PIXEL_FORMAT_YV12A) {
- vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_, config);
- if (!vpx_codec_alpha_)
- return false;
- }
+ if (config.format() != PIXEL_FORMAT_YV12A)
+ return true;
- return true;
+ vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_, config);
+ return !!vpx_codec_alpha_;
}
void VpxVideoDecoder::CloseDecoder() {
if (vpx_codec_) {
vpx_codec_destroy(vpx_codec_);
delete vpx_codec_;
- vpx_codec_ = NULL;
- base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- memory_pool_.get());
- memory_pool_ = NULL;
+ vpx_codec_ = nullptr;
+ memory_pool_ = nullptr;
}
if (vpx_codec_alpha_) {
vpx_codec_destroy(vpx_codec_alpha_);
delete vpx_codec_alpha_;
- vpx_codec_alpha_ = NULL;
- }
-}
-
-void VpxVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
- const DecodeCB& decode_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(!decode_cb.is_null());
- CHECK_NE(state_, kUninitialized);
- CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported.";
-
- decode_cb_ = BindToCurrentLoop(decode_cb);
-
- if (state_ == kError) {
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
- return;
+ vpx_codec_alpha_ = nullptr;
}
-
- // Return empty frames if decoding has finished.
- if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&decode_cb_).Run(kOk);
- return;
- }
-
- DecodeBuffer(buffer);
-}
-
-void VpxVideoDecoder::Reset(const base::Closure& closure) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(decode_cb_.is_null());
-
- state_ = kNormal;
- task_runner_->PostTask(FROM_HERE, closure);
-}
-
-void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_NE(state_, kUninitialized);
- DCHECK_NE(state_, kDecodeFinished);
- DCHECK_NE(state_, kError);
- DCHECK(!decode_cb_.is_null());
- DCHECK(buffer.get());
-
- // Transition to kDecodeFinished on the first end of stream buffer.
- if (state_ == kNormal && buffer->end_of_stream()) {
- state_ = kDecodeFinished;
- base::ResetAndReturn(&decode_cb_).Run(kOk);
- return;
- }
-
- scoped_refptr<VideoFrame> video_frame;
- if (!VpxDecode(buffer, &video_frame)) {
- state_ = kError;
- base::ResetAndReturn(&decode_cb_).Run(kDecodeError);
- return;
- }
-
- if (video_frame.get())
- output_cb_.Run(video_frame);
-
- // VideoDecoderShim expects that |decode_cb| is called only after
- // |output_cb_|.
- base::ResetAndReturn(&decode_cb_).Run(kOk);
}
bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
@@ -444,19 +435,16 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
DCHECK(video_frame);
DCHECK(!buffer->end_of_stream());
- // Pass |buffer| to libvpx.
int64 timestamp = buffer->timestamp().InMicroseconds();
void* user_priv = reinterpret_cast<void*>(&timestamp);
-
{
TRACE_EVENT1("video", "vpx_codec_decode", "timestamp", timestamp);
- vpx_codec_err_t status = vpx_codec_decode(vpx_codec_,
- buffer->data(),
- buffer->data_size(),
- user_priv,
- 0);
+ vpx_codec_err_t status =
+ vpx_codec_decode(vpx_codec_, buffer->data(), buffer->data_size(),
+ user_priv, 0 /* deadline */);
if (status != VPX_CODEC_OK) {
- LOG(ERROR) << "vpx_codec_decode() failed, status=" << status;
+ DLOG(ERROR) << "vpx_codec_decode() error: "
+ << vpx_codec_err_to_string(status);
return false;
}
}
@@ -465,93 +453,107 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
vpx_codec_iter_t iter = NULL;
const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_, &iter);
if (!vpx_image) {
- *video_frame = NULL;
+ *video_frame = nullptr;
return true;
}
- if (vpx_image->user_priv != reinterpret_cast<void*>(&timestamp)) {
- LOG(ERROR) << "Invalid output timestamp.";
+ if (vpx_image->user_priv != user_priv) {
+ DLOG(ERROR) << "Invalid output timestamp.";
return false;
}
- const vpx_image_t* vpx_image_alpha = NULL;
- if (vpx_codec_alpha_ && buffer->side_data_size() >= 8) {
- // Pass alpha data to libvpx.
- int64 timestamp_alpha = buffer->timestamp().InMicroseconds();
- void* user_priv_alpha = reinterpret_cast<void*>(&timestamp_alpha);
-
- // First 8 bytes of side data is side_data_id in big endian.
- const uint64 side_data_id = base::NetToHost64(
- *(reinterpret_cast<const uint64*>(buffer->side_data())));
- if (side_data_id == 1) {
- {
- TRACE_EVENT1("video", "vpx_codec_decode_alpha",
- "timestamp_alpha", timestamp_alpha);
- vpx_codec_err_t status = vpx_codec_decode(vpx_codec_alpha_,
- buffer->side_data() + 8,
- buffer->side_data_size() - 8,
- user_priv_alpha,
- 0);
- if (status != VPX_CODEC_OK) {
- LOG(ERROR) << "vpx_codec_decode() failed on alpha, status=" << status;
- return false;
- }
- }
-
- // Gets pointer to decoded data.
- vpx_codec_iter_t iter_alpha = NULL;
- vpx_image_alpha = vpx_codec_get_frame(vpx_codec_alpha_, &iter_alpha);
- if (!vpx_image_alpha) {
- *video_frame = NULL;
- return true;
- }
-
- if (vpx_image_alpha->user_priv !=
- reinterpret_cast<void*>(&timestamp_alpha)) {
- LOG(ERROR) << "Invalid output timestamp on alpha.";
- return false;
- }
-
- if (vpx_image_alpha->d_h != vpx_image->d_h ||
- vpx_image_alpha->d_w != vpx_image->d_w) {
- LOG(ERROR) << "The alpha plane dimensions are not the same as the "
- "image dimensions.";
- return false;
- }
+ CopyVpxImageToVideoFrame(vpx_image, video_frame);
+ (*video_frame)->set_timestamp(base::TimeDelta::FromMicroseconds(timestamp));
+
+ // Default to the color space from the config, but if the bistream specifies
+ // one, prefer that instead.
+ ColorSpace color_space = config_.color_space();
+ if (vpx_image->cs == VPX_CS_BT_709)
+ color_space = COLOR_SPACE_HD_REC709;
+ else if (vpx_image->cs == VPX_CS_BT_601)
+ color_space = COLOR_SPACE_SD_REC601;
+ (*video_frame)
+ ->metadata()
+ ->SetInteger(VideoFrameMetadata::COLOR_SPACE, color_space);
+
+ if (!vpx_codec_alpha_)
+ return true;
+
+ if (buffer->side_data_size() < 8) {
+ // TODO(mcasas): Is this a warning or an error?
+ DLOG(WARNING) << "Making Alpha channel opaque due to missing input";
+ const uint32 kAlphaOpaqueValue = 255;
+ libyuv::SetPlane((*video_frame)->visible_data(VideoFrame::kAPlane),
+ (*video_frame)->stride(VideoFrame::kAPlane),
+ (*video_frame)->visible_rect().width(),
+ (*video_frame)->visible_rect().height(),
+ kAlphaOpaqueValue);
+ return true;
+ }
+
+ // First 8 bytes of side data is |side_data_id| in big endian.
+ const uint64 side_data_id = base::NetToHost64(
+ *(reinterpret_cast<const uint64*>(buffer->side_data())));
+ if (side_data_id != 1)
+ return true;
+
+ // Try and decode buffer->side_data() minus the first 8 bytes as a full frame.
+ int64 timestamp_alpha = buffer->timestamp().InMicroseconds();
+ void* user_priv_alpha = reinterpret_cast<void*>(&timestamp_alpha);
+ {
+ TRACE_EVENT1("video", "vpx_codec_decode_alpha", "timestamp_alpha",
+ timestamp_alpha);
+ vpx_codec_err_t status = vpx_codec_decode(
+ vpx_codec_alpha_, buffer->side_data() + 8, buffer->side_data_size() - 8,
+ user_priv_alpha, 0 /* deadline */);
+ if (status != VPX_CODEC_OK) {
+ DLOG(ERROR) << "vpx_codec_decode() failed for the alpha: "
+ << vpx_codec_error(vpx_codec_);
+ return false;
}
}
- CopyVpxImageTo(vpx_image, vpx_image_alpha, video_frame);
- (*video_frame)->set_timestamp(base::TimeDelta::FromMicroseconds(timestamp));
+ vpx_codec_iter_t iter_alpha = NULL;
+ const vpx_image_t* vpx_image_alpha =
+ vpx_codec_get_frame(vpx_codec_alpha_, &iter_alpha);
+ if (!vpx_image_alpha) {
+ *video_frame = nullptr;
+ return true;
+ }
+
+ if (vpx_image_alpha->user_priv != user_priv_alpha) {
+ DLOG(ERROR) << "Invalid output timestamp on alpha.";
+ return false;
+ }
+
+ if (vpx_image_alpha->d_h != vpx_image->d_h ||
+ vpx_image_alpha->d_w != vpx_image->d_w) {
+ DLOG(ERROR) << "The alpha plane dimensions are not the same as the "
+ "image dimensions.";
+ return false;
+ }
+
+ libyuv::CopyPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
+ vpx_image_alpha->stride[VPX_PLANE_Y],
+ (*video_frame)->visible_data(VideoFrame::kAPlane),
+ (*video_frame)->stride(VideoFrame::kAPlane),
+ (*video_frame)->visible_rect().width(),
+ (*video_frame)->visible_rect().height());
return true;
}
-void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
- const struct vpx_image* vpx_image_alpha,
- scoped_refptr<VideoFrame>* video_frame) {
+void VpxVideoDecoder::CopyVpxImageToVideoFrame(
+ const struct vpx_image* vpx_image,
+ scoped_refptr<VideoFrame>* video_frame) {
CHECK(vpx_image);
CHECK(vpx_image->fmt == VPX_IMG_FMT_I420 ||
- vpx_image->fmt == VPX_IMG_FMT_YV12 ||
vpx_image->fmt == VPX_IMG_FMT_I444);
VideoPixelFormat codec_format = PIXEL_FORMAT_YV12;
- int uv_rows = (vpx_image->d_h + 1) / 2;
-
- if (vpx_image->fmt == VPX_IMG_FMT_I444) {
- CHECK(!vpx_codec_alpha_);
+ if (vpx_image->fmt == VPX_IMG_FMT_I444)
codec_format = PIXEL_FORMAT_YV24;
- uv_rows = vpx_image->d_h;
- } else if (vpx_codec_alpha_) {
+ else if (vpx_codec_alpha_)
codec_format = PIXEL_FORMAT_YV12A;
- }
-
- // Default to the color space from the config, but if the bistream specifies
- // one, prefer that instead.
- ColorSpace color_space = config_.color_space();
- if (vpx_image->cs == VPX_CS_BT_709)
- color_space = COLOR_SPACE_HD_REC709;
- else if (vpx_image->cs == VPX_CS_BT_601)
- color_space = COLOR_SPACE_SD_REC601;
// The mixed |w|/|d_h| in |coded_size| is intentional. Setting the correct
// coded width is necessary to allow coalesced memory access, which may avoid
@@ -560,7 +562,9 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
const gfx::Size coded_size(vpx_image->w, vpx_image->d_h);
const gfx::Size visible_size(vpx_image->d_w, vpx_image->d_h);
- if (!vpx_codec_alpha_ && memory_pool_.get()) {
+ if (memory_pool_.get()) {
+ DCHECK_EQ(kCodecVP9, config_.codec());
+ DCHECK(!vpx_codec_alpha_) << "Uh-oh, VP9 and Alpha shouldn't coexist.";
*video_frame = VideoFrame::WrapExternalYuvData(
codec_format,
coded_size, gfx::Rect(visible_size), config_.natural_size(),
@@ -573,8 +577,6 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
kNoTimestamp());
video_frame->get()->AddDestructionObserver(
memory_pool_->CreateFrameCallback(vpx_image->fb_priv));
- video_frame->get()->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
- color_space);
UMA_HISTOGRAM_COUNTS("Media.Vpx.VideoDecoderBuffersInUseByDecoder",
memory_pool_->NumberOfFrameBuffersInUseByDecoder());
@@ -585,38 +587,24 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
return;
}
+ DCHECK(codec_format == PIXEL_FORMAT_YV12 ||
+ codec_format == PIXEL_FORMAT_YV12A);
+
*video_frame = frame_pool_.CreateFrame(
- codec_format,
- visible_size,
- gfx::Rect(visible_size),
- config_.natural_size(),
- kNoTimestamp());
- video_frame->get()->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
- color_space);
-
- CopyYPlane(vpx_image->planes[VPX_PLANE_Y],
- vpx_image->stride[VPX_PLANE_Y],
- vpx_image->d_h,
- video_frame->get());
- CopyUPlane(vpx_image->planes[VPX_PLANE_U],
- vpx_image->stride[VPX_PLANE_U],
- uv_rows,
- video_frame->get());
- CopyVPlane(vpx_image->planes[VPX_PLANE_V],
- vpx_image->stride[VPX_PLANE_V],
- uv_rows,
- video_frame->get());
- if (!vpx_codec_alpha_)
- return;
- if (!vpx_image_alpha) {
- MakeOpaqueAPlane(
- vpx_image->stride[VPX_PLANE_Y], vpx_image->d_h, video_frame->get());
- return;
- }
- CopyAPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
- vpx_image_alpha->stride[VPX_PLANE_Y],
- vpx_image_alpha->d_h,
- video_frame->get());
+ codec_format, visible_size, gfx::Rect(visible_size),
+ config_.natural_size(), kNoTimestamp());
+
+ libyuv::I420Copy(
+ vpx_image->planes[VPX_PLANE_Y], vpx_image->stride[VPX_PLANE_Y],
+ vpx_image->planes[VPX_PLANE_U], vpx_image->stride[VPX_PLANE_U],
+ vpx_image->planes[VPX_PLANE_V], vpx_image->stride[VPX_PLANE_V],
+ (*video_frame)->visible_data(VideoFrame::kYPlane),
+ (*video_frame)->stride(VideoFrame::kYPlane),
+ (*video_frame)->visible_data(VideoFrame::kUPlane),
+ (*video_frame)->stride(VideoFrame::kUPlane),
+ (*video_frame)->visible_data(VideoFrame::kVPlane),
+ (*video_frame)->stride(VideoFrame::kVPlane), coded_size.width(),
+ coded_size.height());
}
} // namespace media
diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h
index 5bbc086..1938dca 100644
--- a/media/filters/vpx_video_decoder.h
+++ b/media/filters/vpx_video_decoder.h
@@ -6,6 +6,7 @@
#define MEDIA_FILTERS_VPX_VIDEO_DECODER_H_
#include "base/callback.h"
+#include "base/threading/thread_checker.h"
#include "media/base/demuxer_stream.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
@@ -23,12 +24,14 @@ namespace media {
// Libvpx video decoder wrapper.
// Note: VpxVideoDecoder accepts only YV12A VP8 content or VP9 content. This is
-// done to avoid usurping FFmpeg for all vp8 decoding, because the FFmpeg VP8
+// done to avoid usurping FFmpeg for all VP8 decoding, because the FFmpeg VP8
// decoder is faster than the libvpx VP8 decoder.
+// Alpha channel, if any, is sent in the DecoderBuffer's side_data() as a frame
+// on its own of which the Y channel is taken [1].
+// [1] http://wiki.webmproject.org/alpha-channel
class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
public:
- explicit VpxVideoDecoder(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+ VpxVideoDecoder();
~VpxVideoDecoder() override;
// VideoDecoder implementation.
@@ -57,29 +60,28 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
void CloseDecoder();
- void DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer);
+ // Try to decode |buffer| into |video_frame|. Return true if all decoding
+ // succeeded. Note that decoding can succeed and still |video_frame| be
+ // nullptr if there has been a partial decoding.
bool VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
scoped_refptr<VideoFrame>* video_frame);
- void CopyVpxImageTo(const vpx_image* vpx_image,
- const struct vpx_image* vpx_image_alpha,
- scoped_refptr<VideoFrame>* video_frame);
+ void CopyVpxImageToVideoFrame(const struct vpx_image* vpx_image,
+ scoped_refptr<VideoFrame>* video_frame);
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ base::ThreadChecker thread_checker_;
DecoderState state_;
OutputCB output_cb_;
- // TODO(xhwang): Merge DecodeBuffer() into Decode() and remove this.
- DecodeCB decode_cb_;
-
VideoDecoderConfig config_;
vpx_codec_ctx* vpx_codec_;
vpx_codec_ctx* vpx_codec_alpha_;
- // Memory pool used for VP9 decoding.
+ // |memory_pool_| is a single-threaded memory pool used for VP9 decoding
+ // with no alpha. |frame_pool_| is used for all other cases.
class MemoryPool;
scoped_refptr<MemoryPool> memory_pool_;
diff --git a/media/renderers/default_renderer_factory.cc b/media/renderers/default_renderer_factory.cc
index 6ebc372..c1c2aaa 100644
--- a/media/renderers/default_renderer_factory.cc
+++ b/media/renderers/default_renderer_factory.cc
@@ -78,7 +78,7 @@ scoped_ptr<Renderer> DefaultRendererFactory::CreateRenderer(
video_decoders.push_back(new GpuVideoDecoder(gpu_factories_));
#if !defined(MEDIA_DISABLE_LIBVPX)
- video_decoders.push_back(new VpxVideoDecoder(media_task_runner));
+ video_decoders.push_back(new VpxVideoDecoder());
#endif
#if !defined(MEDIA_DISABLE_FFMPEG) && !defined(DISABLE_FFMPEG_VIDEO_DECODERS)
diff --git a/media/test/pipeline_integration_test_base.cc b/media/test/pipeline_integration_test_base.cc
index ca9dee9..9676b50 100644
--- a/media/test/pipeline_integration_test_base.cc
+++ b/media/test/pipeline_integration_test_base.cc
@@ -231,8 +231,7 @@ void PipelineIntegrationTestBase::CreateDemuxer(const std::string& filename) {
scoped_ptr<Renderer> PipelineIntegrationTestBase::CreateRenderer() {
ScopedVector<VideoDecoder> video_decoders;
#if !defined(MEDIA_DISABLE_LIBVPX)
- video_decoders.push_back(
- new VpxVideoDecoder(message_loop_.task_runner()));
+ video_decoders.push_back(new VpxVideoDecoder());
#endif // !defined(MEDIA_DISABLE_LIBVPX)
#if !defined(MEDIA_DISABLE_FFMPEG)