summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoremircan <emircan@chromium.org>2016-03-08 14:13:50 -0800
committerCommit bot <commit-bot@chromium.org>2016-03-08 22:15:06 +0000
commitec1e7ff3b75787d3290ba5a899db8ef4ca79d302 (patch)
treee7aa9389e05cb94f8f2240291fc3a68bfd4aa811
parentbddee54521cae58e45f7ff92a2bf5819d7fe49de (diff)
downloadchromium_src-ec1e7ff3b75787d3290ba5a899db8ef4ca79d302.zip
chromium_src-ec1e7ff3b75787d3290ba5a899db8ef4ca79d302.tar.gz
chromium_src-ec1e7ff3b75787d3290ba5a899db8ef4ca79d302.tar.bz2
Handle Alpha channel in Canvas capture
This CL changes the output frame format in canvas capture from I420 to YV12A so that alpha channel is preserved. The change discussion is here [0]. As a result, each MediaStreamSink would be responsible for handling YV12A frames. For that purpose VideoUtil::WrapAsI420VideoFrame() is added to each appropriate sink. [0] https://github.com/w3c/mediacapture-fromelement/issues/30 [1] https://code.google.com/p/chromium/codesearch#chromium/src/content/public/renderer/media_stream_sink.h&l=18&ct=xref_jump_to_def&cl=GROK&gsn=MediaStreamSink BUG=524218 TEST= https://cdn.rawgit.com/uysalere/js-demos/master/canvascapture3.html TBR=bbudge@chromium.org Review URL: https://codereview.chromium.org/1737253002 Cr-Commit-Position: refs/heads/master@{#379932}
-rw-r--r--chrome/renderer/media/cast_rtp_stream.cc14
-rw-r--r--content/renderer/media/canvas_capture_handler.cc34
-rw-r--r--content/renderer/media/canvas_capture_handler_unittest.cc53
-rw-r--r--content/renderer/media/media_stream_video_renderer_sink_unittest.cc45
-rw-r--r--content/renderer/media/media_stream_video_track.cc6
-rw-r--r--content/renderer/media/video_track_adapter.cc4
-rw-r--r--content/renderer/media/video_track_recorder.cc14
-rw-r--r--content/renderer/media/webmediaplayer_ms.cc2
-rw-r--r--content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc16
-rw-r--r--content/renderer/pepper/pepper_media_stream_video_track_host.cc9
-rw-r--r--content/renderer/pepper/pepper_video_source_host.cc14
-rw-r--r--media/base/mac/video_frame_mac_unittests.cc2
-rw-r--r--media/base/video_frame.cc41
-rw-r--r--media/base/video_frame.h1
-rw-r--r--media/base/video_frame_pool.cc2
-rw-r--r--media/base/video_frame_unittest.cc5
-rw-r--r--media/base/video_util.cc23
-rw-r--r--media/base/video_util.h5
18 files changed, 228 insertions, 62 deletions
diff --git a/chrome/renderer/media/cast_rtp_stream.cc b/chrome/renderer/media/cast_rtp_stream.cc
index 3c7d101..fb5afff 100644
--- a/chrome/renderer/media/cast_rtp_stream.cc
+++ b/chrome/renderer/media/cast_rtp_stream.cc
@@ -30,6 +30,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/limits.h"
#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_sender.h"
#include "media/cast/net/cast_transport_config.h"
@@ -322,12 +323,23 @@ class CastVideoSink : public base::SupportsWeakPtr<CastVideoSink>,
const CastRtpStream::ErrorCallback& error_callback,
const scoped_refptr<media::cast::VideoFrameInput> frame_input,
// These parameters are passed for each frame.
- const scoped_refptr<media::VideoFrame>& frame,
+ const scoped_refptr<media::VideoFrame>& video_frame,
base::TimeTicks estimated_capture_time) {
const base::TimeTicks timestamp = estimated_capture_time.is_null()
? base::TimeTicks::Now()
: estimated_capture_time;
+ if (!(video_frame->format() == media::PIXEL_FORMAT_I420 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12A)) {
+ NOTREACHED();
+ return;
+ }
+ scoped_refptr<media::VideoFrame> frame = video_frame;
+ // Drop alpha channel since we do not support it yet.
+ if (frame->format() == media::PIXEL_FORMAT_YV12A)
+ frame = media::WrapAsI420VideoFrame(video_frame);
+
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT2(
"cast_perf_test", "MediaStreamVideoSink::OnVideoFrame",
diff --git a/content/renderer/media/canvas_capture_handler.cc b/content/renderer/media/canvas_capture_handler.cc
index 634267e..4a44740 100644
--- a/content/renderer/media/canvas_capture_handler.cc
+++ b/content/renderer/media/canvas_capture_handler.cc
@@ -39,11 +39,13 @@ class CanvasCaptureHandler::VideoCapturerSource
double max_requested_frame_rate,
const VideoCaptureDeviceFormatsCB& callback) override {
const blink::WebSize& size = canvas_handler_->GetSourceSize();
- const media::VideoCaptureFormat format(gfx::Size(size.width, size.height),
- frame_rate_,
- media::PIXEL_FORMAT_I420);
media::VideoCaptureFormats formats;
- formats.push_back(format);
+ formats.push_back(
+ media::VideoCaptureFormat(gfx::Size(size.width, size.height),
+ frame_rate_, media::PIXEL_FORMAT_I420));
+ formats.push_back(
+ media::VideoCaptureFormat(gfx::Size(size.width, size.height),
+ frame_rate_, media::PIXEL_FORMAT_YV12A));
callback.Run(formats);
}
void StartCapture(const media::VideoCaptureParams& params,
@@ -166,8 +168,8 @@ void CanvasCaptureHandler::StopVideoCapture() {
void CanvasCaptureHandler::CreateNewFrame(const SkImage* image) {
DCHECK(thread_checker_.CalledOnValidThread());
-
DCHECK(image);
+
const gfx::Size size(image->width(), image->height());
if (size != last_size) {
temp_data_.resize(
@@ -176,16 +178,23 @@ void CanvasCaptureHandler::CreateNewFrame(const SkImage* image) {
media::VideoFrame::RowBytes(0, media::PIXEL_FORMAT_ARGB, size.width());
image_info_ =
SkImageInfo::Make(size.width(), size.height(), kBGRA_8888_SkColorType,
- kPremul_SkAlphaType);
+ kUnpremul_SkAlphaType);
last_size = size;
}
- image->readPixels(image_info_, &temp_data_[0], row_bytes_, 0, 0);
- scoped_refptr<media::VideoFrame> video_frame =
- frame_pool_.CreateFrame(media::PIXEL_FORMAT_I420, size, gfx::Rect(size),
- size, base::TimeTicks::Now() - base::TimeTicks());
+ if(!image->readPixels(image_info_, &temp_data_[0], row_bytes_, 0, 0)) {
+ DLOG(ERROR) << "Couldn't read SkImage pixels";
+ return;
+ }
+
+ const bool isOpaque = image->isOpaque();
+ scoped_refptr<media::VideoFrame> video_frame = frame_pool_.CreateFrame(
+ isOpaque ? media::PIXEL_FORMAT_I420 : media::PIXEL_FORMAT_YV12A, size,
+ gfx::Rect(size), size, base::TimeTicks::Now() - base::TimeTicks());
DCHECK(video_frame);
+ // TODO(emircan): Use https://code.google.com/p/libyuv/issues/detail?id=572
+ // when it becomes available.
libyuv::ARGBToI420(temp_data_.data(), row_bytes_,
video_frame->data(media::VideoFrame::kYPlane),
video_frame->stride(media::VideoFrame::kYPlane),
@@ -194,6 +203,11 @@ void CanvasCaptureHandler::CreateNewFrame(const SkImage* image) {
video_frame->data(media::VideoFrame::kVPlane),
video_frame->stride(media::VideoFrame::kVPlane),
size.width(), size.height());
+ if (!isOpaque) {
+ for (int p = 0; p < size.GetArea(); ++p)
+ video_frame->data(media::VideoFrame::kAPlane)[p] = temp_data_[p * 4 + 3];
+ }
+
io_task_runner_->PostTask(
FROM_HERE,
base::Bind(&CanvasCaptureHandler::CanvasCaptureHandlerDelegate::
diff --git a/content/renderer/media/canvas_capture_handler_unittest.cc b/content/renderer/media/canvas_capture_handler_unittest.cc
index 8519bd1..8c449a3 100644
--- a/content/renderer/media/canvas_capture_handler_unittest.cc
+++ b/content/renderer/media/canvas_capture_handler_unittest.cc
@@ -22,9 +22,12 @@ using ::testing::InSequence;
using ::testing::Mock;
using ::testing::SaveArg;
using ::testing::Test;
+using ::testing::TestWithParam;
namespace content {
+namespace {
+
static const int kTestCanvasCaptureWidth = 320;
static const int kTestCanvasCaptureHeight = 240;
static const double kTestCanvasCaptureFramesPerSecond = 55.5;
@@ -32,12 +35,15 @@ static const double kTestCanvasCaptureFramesPerSecond = 55.5;
static const int kTestCanvasCaptureFrameWidth = 2;
static const int kTestCanvasCaptureFrameHeight = 2;
static const int kTestCanvasCaptureFrameErrorTolerance = 2;
+static const int kTestAlphaValue = 175;
ACTION_P(RunClosure, closure) {
closure.Run();
}
-class CanvasCaptureHandlerTest : public Test {
+} // namespace
+
+class CanvasCaptureHandlerTest : public TestWithParam<bool> {
public:
CanvasCaptureHandlerTest() {}
@@ -76,27 +82,37 @@ class CanvasCaptureHandlerTest : public Test {
void OnRunning(bool state) { DoOnRunning(state); }
// Verify returned frames.
- static skia::RefPtr<SkImage> GenerateTestImage() {
+ static skia::RefPtr<SkImage> GenerateTestImage(bool opaque) {
+
SkBitmap testBitmap;
testBitmap.allocN32Pixels(kTestCanvasCaptureFrameWidth,
- kTestCanvasCaptureFrameHeight);
- testBitmap.eraseColor(SK_ColorBLUE);
+ kTestCanvasCaptureFrameHeight, opaque);
+ testBitmap.eraseARGB(kTestAlphaValue, 30, 60, 200);
return skia::AdoptRef(SkImage::NewFromBitmap(testBitmap));
}
void OnVerifyDeliveredFrame(
+ bool opaque,
const scoped_refptr<media::VideoFrame>& video_frame,
base::TimeTicks estimated_capture_time) {
- EXPECT_EQ(media::PIXEL_FORMAT_I420, video_frame->format());
+ if (opaque)
+ EXPECT_EQ(media::PIXEL_FORMAT_I420, video_frame->format());
+ else
+ EXPECT_EQ(media::PIXEL_FORMAT_YV12A, video_frame->format());
+
const gfx::Size& size = video_frame->coded_size();
EXPECT_EQ(kTestCanvasCaptureFrameWidth, size.width());
EXPECT_EQ(kTestCanvasCaptureFrameHeight, size.height());
- const uint8_t* y_plane = video_frame->data(0);
- EXPECT_NEAR(41, y_plane[0], kTestCanvasCaptureFrameErrorTolerance);
- const uint8_t* u_plane = video_frame->data(1);
- EXPECT_NEAR(239, u_plane[0], kTestCanvasCaptureFrameErrorTolerance);
- const uint8_t* v_plane = video_frame->data(2);
- EXPECT_NEAR(110, v_plane[0], kTestCanvasCaptureFrameErrorTolerance);
+ const uint8_t* y_plane = video_frame->data(media::VideoFrame::kYPlane);
+ EXPECT_NEAR(74, y_plane[0], kTestCanvasCaptureFrameErrorTolerance);
+ const uint8_t* u_plane = video_frame->data(media::VideoFrame::kUPlane);
+ EXPECT_NEAR(193, u_plane[0], kTestCanvasCaptureFrameErrorTolerance);
+ const uint8_t* v_plane = video_frame->data(media::VideoFrame::kVPlane);
+ EXPECT_NEAR(105, v_plane[0], kTestCanvasCaptureFrameErrorTolerance);
+ if (!opaque) {
+ const uint8_t* a_plane = video_frame->data(media::VideoFrame::kAPlane);
+ EXPECT_EQ(kTestAlphaValue, a_plane[0]);
+ }
}
blink::WebMediaStreamTrack track_;
@@ -125,7 +141,7 @@ TEST_F(CanvasCaptureHandlerTest, ConstructAndDestruct) {
}
// Checks that VideoCapturerSource call sequence works fine.
-TEST_F(CanvasCaptureHandlerTest, GetFormatsStartAndStop) {
+TEST_P(CanvasCaptureHandlerTest, GetFormatsStartAndStop) {
InSequence s;
const blink::WebMediaStreamSource& web_media_stream_source = track_.source();
EXPECT_FALSE(web_media_stream_source.isNull());
@@ -146,7 +162,7 @@ TEST_F(CanvasCaptureHandlerTest, GetFormatsStartAndStop) {
media::limits::kMaxFramesPerSecond /* max_requested_frame_rate */,
base::Bind(&CanvasCaptureHandlerTest::OnVideoCaptureDeviceFormats,
base::Unretained(this)));
- ASSERT_EQ(1u, formats.size());
+ ASSERT_EQ(2u, formats.size());
EXPECT_EQ(kTestCanvasCaptureWidth, formats[0].frame_size.width());
EXPECT_EQ(kTestCanvasCaptureHeight, formats[0].frame_size.height());
media::VideoCaptureParams params;
@@ -162,14 +178,15 @@ TEST_F(CanvasCaptureHandlerTest, GetFormatsStartAndStop) {
params, base::Bind(&CanvasCaptureHandlerTest::OnDeliverFrame,
base::Unretained(this)),
base::Bind(&CanvasCaptureHandlerTest::OnRunning, base::Unretained(this)));
- canvas_capture_handler_->sendNewFrame(GenerateTestImage().get());
+ canvas_capture_handler_->sendNewFrame(GenerateTestImage(GetParam()).get());
run_loop.Run();
source->StopCapture();
}
// Verifies that SkImage is processed and produces VideoFrame as expected.
-TEST_F(CanvasCaptureHandlerTest, VerifyFrame) {
+TEST_P(CanvasCaptureHandlerTest, VerifyOpaqueFrame) {
+ const bool isOpaque = GetParam();
InSequence s;
media::VideoCapturerSource* const source =
GetVideoCapturerSource(static_cast<MediaStreamVideoCapturerSource*>(
@@ -181,9 +198,9 @@ TEST_F(CanvasCaptureHandlerTest, VerifyFrame) {
media::VideoCaptureParams params;
source->StartCapture(
params, base::Bind(&CanvasCaptureHandlerTest::OnVerifyDeliveredFrame,
- base::Unretained(this)),
+ base::Unretained(this), isOpaque),
base::Bind(&CanvasCaptureHandlerTest::OnRunning, base::Unretained(this)));
- canvas_capture_handler_->sendNewFrame(GenerateTestImage().get());
+ canvas_capture_handler_->sendNewFrame(GenerateTestImage(isOpaque).get());
run_loop.RunUntilIdle();
}
@@ -199,4 +216,6 @@ TEST_F(CanvasCaptureHandlerTest, CheckNeedsNewFrame) {
EXPECT_FALSE(canvas_capture_handler_->needsNewFrame());
}
+INSTANTIATE_TEST_CASE_P(, CanvasCaptureHandlerTest, ::testing::Bool());
+
} // namespace content
diff --git a/content/renderer/media/media_stream_video_renderer_sink_unittest.cc b/content/renderer/media/media_stream_video_renderer_sink_unittest.cc
index 32b23e3..77edaf8 100644
--- a/content/renderer/media/media_stream_video_renderer_sink_unittest.cc
+++ b/content/renderer/media/media_stream_video_renderer_sink_unittest.cc
@@ -39,12 +39,11 @@ class MediaStreamVideoRendererSinkTest : public testing::Test {
registry_.AddVideoTrack(kTestVideoTrackId);
// Extract the Blink Video Track for the MSVRSink.
- blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
- registry_.test_stream().videoTracks(video_tracks);
- EXPECT_EQ(1u, video_tracks.size());
+ registry_.test_stream().videoTracks(video_tracks_);
+ EXPECT_EQ(1u, video_tracks_.size());
media_stream_video_renderer_sink_ = new MediaStreamVideoRendererSink(
- video_tracks[0],
+ video_tracks_[0],
base::Bind(&MediaStreamVideoRendererSinkTest::ErrorCallback,
base::Unretained(this)),
base::Bind(&MediaStreamVideoRendererSinkTest::RepaintCallback,
@@ -91,6 +90,8 @@ class MediaStreamVideoRendererSinkTest : public testing::Test {
// and Sources in |registry_| into believing they are on the right threads.
base::MessageLoopForUI message_loop_;
const ChildProcess child_process_;
+
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks_;
MockMediaStreamRegistry registry_;
private:
@@ -158,4 +159,40 @@ TEST_F(MediaStreamVideoRendererSinkAsyncAddFrameReadyTest,
media_stream_video_renderer_sink_->Stop();
}
+class MediaStreamVideoRendererSinkTransparencyTest
+ : public MediaStreamVideoRendererSinkTest {
+ public:
+ MediaStreamVideoRendererSinkTransparencyTest() {
+ media_stream_video_renderer_sink_ = new MediaStreamVideoRendererSink(
+ video_tracks_[0],
+ base::Bind(&MediaStreamVideoRendererSinkTest::ErrorCallback,
+ base::Unretained(this)),
+ base::Bind(&MediaStreamVideoRendererSinkTransparencyTest::
+ VerifyTransparentFrame,
+ base::Unretained(this)),
+ message_loop_.task_runner(), message_loop_.task_runner().get(),
+ nullptr /* gpu_factories */);
+ }
+
+ void VerifyTransparentFrame(const scoped_refptr<media::VideoFrame>& frame) {
+ EXPECT_EQ(media::PIXEL_FORMAT_YV12A, frame->format());
+ }
+};
+
+TEST_F(MediaStreamVideoRendererSinkTransparencyTest,
+ SendTransparentFrame) {
+ media_stream_video_renderer_sink_->Start();
+
+ InSequence s;
+ const gfx::Size kSize(10, 10);
+ const base::TimeDelta kTimestamp = base::TimeDelta();
+ const scoped_refptr<media::VideoFrame> video_frame =
+ media::VideoFrame::CreateFrame(media::PIXEL_FORMAT_YV12A, kSize,
+ gfx::Rect(kSize), kSize, kTimestamp);
+ OnVideoFrame(video_frame);
+ message_loop_.RunUntilIdle();
+
+ media_stream_video_renderer_sink_->Stop();
+}
+
} // namespace content
diff --git a/content/renderer/media/media_stream_video_track.cc b/content/renderer/media/media_stream_video_track.cc
index 6ff4688..e9cc8e0 100644
--- a/content/renderer/media/media_stream_video_track.cc
+++ b/content/renderer/media/media_stream_video_track.cc
@@ -175,9 +175,9 @@ MediaStreamVideoTrack::FrameDeliverer::GetBlackFrame(
// Wrap |black_frame_| so we get a fresh timestamp we can modify. Frames
// returned from this function may still be in use.
scoped_refptr<media::VideoFrame> wrapped_black_frame =
- media::VideoFrame::WrapVideoFrame(
- black_frame_, black_frame_->visible_rect(),
- black_frame_->natural_size());
+ media::VideoFrame::WrapVideoFrame(black_frame_, black_frame_->format(),
+ black_frame_->visible_rect(),
+ black_frame_->natural_size());
if (!wrapped_black_frame)
return nullptr;
wrapped_black_frame->AddDestructionObserver(
diff --git a/content/renderer/media/video_track_adapter.cc b/content/renderer/media/video_track_adapter.cc
index f4ca1d2..bf737bd 100644
--- a/content/renderer/media/video_track_adapter.cc
+++ b/content/renderer/media/video_track_adapter.cc
@@ -266,8 +266,8 @@ void VideoTrackAdapter::VideoFrameResolutionAdapter::DeliverFrame(
const gfx::Rect region_in_frame =
media::ComputeLetterboxRegion(frame->visible_rect(), desired_size);
- video_frame =
- media::VideoFrame::WrapVideoFrame(frame, region_in_frame, desired_size);
+ video_frame = media::VideoFrame::WrapVideoFrame(
+ frame, frame->format(), region_in_frame, desired_size);
if (!video_frame)
return;
video_frame->AddDestructionObserver(
diff --git a/content/renderer/media/video_track_recorder.cc b/content/renderer/media/video_track_recorder.cc
index 1dc73b7..390a0ff 100644
--- a/content/renderer/media/video_track_recorder.cc
+++ b/content/renderer/media/video_track_recorder.cc
@@ -14,6 +14,7 @@
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
#include "ui/gfx/geometry/size.h"
extern "C" {
@@ -186,12 +187,23 @@ void VideoTrackRecorder::VpxEncoder::StartFrameEncode(
}
void VideoTrackRecorder::VpxEncoder::EncodeOnEncodingThread(
- const scoped_refptr<VideoFrame>& frame,
+ const scoped_refptr<VideoFrame>& video_frame,
base::TimeTicks capture_timestamp) {
TRACE_EVENT0("video",
"VideoTrackRecorder::VpxEncoder::EncodeOnEncodingThread");
DCHECK(encoding_thread_->task_runner()->BelongsToCurrentThread());
+ if (!(video_frame->format() == media::PIXEL_FORMAT_I420 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12A)) {
+ NOTREACHED();
+ return;
+ }
+ scoped_refptr<media::VideoFrame> frame = video_frame;
+ // Drop alpha channel since we do not support it yet.
+ if (frame->format() == media::PIXEL_FORMAT_YV12A)
+ frame = media::WrapAsI420VideoFrame(video_frame);
+
const gfx::Size frame_size = frame->visible_rect().size();
if (!IsInitialized() ||
gfx::Size(codec_config_.g_w, codec_config_.g_h) != frame_size) {
diff --git a/content/renderer/media/webmediaplayer_ms.cc b/content/renderer/media/webmediaplayer_ms.cc
index 9f59a76..4134b84 100644
--- a/content/renderer/media/webmediaplayer_ms.cc
+++ b/content/renderer/media/webmediaplayer_ms.cc
@@ -495,7 +495,7 @@ void WebMediaPlayerMS::OnFrameAvailable(
video_weblayer_.reset(new cc_blink::WebLayerImpl(
cc::VideoLayer::Create(cc::LayerSettings(),
compositor_.get(), media::VIDEO_ROTATION_0)));
- video_weblayer_->layer()->SetContentsOpaque(true);
+ video_weblayer_->layer()->SetContentsOpaque(false);
video_weblayer_->SetContentsOpaqueIsFixed(true);
get_client()->setWebLayer(video_weblayer_.get());
}
diff --git a/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc b/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc
index 962164c..10ad857 100644
--- a/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc
+++ b/content/renderer/media/webrtc/webrtc_video_capturer_adapter.cc
@@ -11,6 +11,7 @@
#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/base/video_frame_pool.h"
+#include "media/base/video_util.h"
#include "third_party/libyuv/include/libyuv/convert_from.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/webrtc/common_video/include/video_frame_buffer.h"
@@ -94,7 +95,8 @@ class WebRtcVideoCapturerAdapter::MediaVideoFrameFactory
const gfx::Size output_size(output_width, output_height);
scoped_refptr<media::VideoFrame> video_frame =
- media::VideoFrame::WrapVideoFrame(frame_, visible_rect, output_size);
+ media::VideoFrame::WrapVideoFrame(frame_, frame_->format(),
+ visible_rect, output_size);
if (!video_frame)
return nullptr;
video_frame->AddDestructionObserver(
@@ -216,16 +218,22 @@ bool WebRtcVideoCapturerAdapter::GetBestCaptureFormat(
}
void WebRtcVideoCapturerAdapter::OnFrameCaptured(
- const scoped_refptr<media::VideoFrame>& frame) {
+ const scoped_refptr<media::VideoFrame>& video_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("video", "WebRtcVideoCapturerAdapter::OnFrameCaptured");
- if (!(frame->IsMappable() && (frame->format() == media::PIXEL_FORMAT_I420 ||
- frame->format() == media::PIXEL_FORMAT_YV12))) {
+ if (!(video_frame->IsMappable() &&
+ (video_frame->format() == media::PIXEL_FORMAT_I420 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12A))) {
// Since connecting sources and sinks do not check the format, we need to
// just ignore formats that we can not handle.
NOTREACHED();
return;
}
+ scoped_refptr<media::VideoFrame> frame = video_frame;
+ // Drop alpha channel since we do not support it yet.
+ if (frame->format() == media::PIXEL_FORMAT_YV12A)
+ frame = media::WrapAsI420VideoFrame(video_frame);
// Inject the frame via the VideoFrameFactory of base class.
MediaVideoFrameFactory* media_video_frame_factory =
diff --git a/content/renderer/pepper/pepper_media_stream_video_track_host.cc b/content/renderer/pepper/pepper_media_stream_video_track_host.cc
index d0e47c2..e834b10 100644
--- a/content/renderer/pepper/pepper_media_stream_video_track_host.cc
+++ b/content/renderer/pepper/pepper_media_stream_video_track_host.cc
@@ -13,6 +13,7 @@
#include "base/strings/utf_string_conversions.h"
#include "content/renderer/media/media_stream_video_track.h"
#include "media/base/bind_to_current_loop.h"
+#include "media/base/video_util.h"
#include "media/base/yuv_convert.h"
#include "ppapi/c/pp_errors.h"
#include "ppapi/c/ppb_media_stream_video_track.h"
@@ -370,10 +371,14 @@ int32_t PepperMediaStreamVideoTrackHost::SendFrameToTrack(int32_t index) {
}
void PepperMediaStreamVideoTrackHost::OnVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
+ const scoped_refptr<VideoFrame>& video_frame,
base::TimeTicks estimated_capture_time) {
- DCHECK(frame.get());
+ DCHECK(video_frame.get());
// TODO(penghuang): Check |frame->end_of_stream()| and close the track.
+ scoped_refptr<media::VideoFrame> frame = video_frame;
+ // Drop alpha channel since we do not support it yet.
+ if (frame->format() == media::PIXEL_FORMAT_YV12A)
+ frame = media::WrapAsI420VideoFrame(video_frame);
PP_VideoFrame_Format ppformat = ToPpapiFormat(frame->format());
if (ppformat == PP_VIDEOFRAME_FORMAT_UNKNOWN)
return;
diff --git a/content/renderer/pepper/pepper_video_source_host.cc b/content/renderer/pepper/pepper_video_source_host.cc
index db20126..cac83eb 100644
--- a/content/renderer/pepper/pepper_video_source_host.cc
+++ b/content/renderer/pepper/pepper_video_source_host.cc
@@ -12,6 +12,7 @@
#include "content/renderer/media/video_track_to_pepper_adapter.h"
#include "content/renderer/pepper/ppb_image_data_impl.h"
#include "content/renderer/render_thread_impl.h"
+#include "media/base/video_util.h"
#include "ppapi/c/pp_errors.h"
#include "ppapi/host/dispatch_host_message.h"
#include "ppapi/host/ppapi_host.h"
@@ -55,10 +56,21 @@ PepperVideoSourceHost::FrameReceiver::FrameReceiver(
PepperVideoSourceHost::FrameReceiver::~FrameReceiver() {}
void PepperVideoSourceHost::FrameReceiver::GotFrame(
- const scoped_refptr<media::VideoFrame>& frame) {
+ const scoped_refptr<media::VideoFrame>& video_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
if (!host_)
return;
+
+ if (!(video_frame->format() == media::PIXEL_FORMAT_I420 ||
+ video_frame->format() == media::PIXEL_FORMAT_YV12A)) {
+ NOTREACHED();
+ return;
+ }
+ scoped_refptr<media::VideoFrame> frame = video_frame;
+ // Drop alpha channel since we do not support it yet.
+ if (frame->format() == media::PIXEL_FORMAT_YV12A)
+ frame = media::WrapAsI420VideoFrame(video_frame);
+
// Hold a reference to the new frame and release the previous.
host_->last_frame_ = frame;
if (host_->get_frame_pending_)
diff --git a/media/base/mac/video_frame_mac_unittests.cc b/media/base/mac/video_frame_mac_unittests.cc
index 475d9aa..c0d8109 100644
--- a/media/base/mac/video_frame_mac_unittests.cc
+++ b/media/base/mac/video_frame_mac_unittests.cc
@@ -94,7 +94,7 @@ TEST(VideoFrameMac, CheckLifetime) {
int instances_destroyed = 0;
auto wrapper_frame = VideoFrame::WrapVideoFrame(
- frame, frame->visible_rect(), frame->natural_size());
+ frame, frame->format(), frame->visible_rect(), frame->natural_size());
wrapper_frame->AddDestructionObserver(
base::Bind(&Increment, &instances_destroyed));
ASSERT_TRUE(wrapper_frame.get());
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index ea7370b..b4a8392f 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -83,6 +83,18 @@ static bool IsStorageTypeMappable(VideoFrame::StorageType storage_type) {
storage_type == VideoFrame::STORAGE_MOJO_SHARED_BUFFER);
}
+// Checks if |source_format| can be wrapped into a |target_format| frame.
+static bool AreValidPixelFormatsForWrap(VideoPixelFormat source_format,
+ VideoPixelFormat target_format) {
+ if (source_format == target_format)
+ return true;
+
+ // It is possible to add other planar to planar format conversions here if the
+ // use case is there.
+ return source_format == PIXEL_FORMAT_YV12A &&
+ target_format == PIXEL_FORMAT_I420;
+}
+
// static
bool VideoFrame::IsValidConfig(VideoPixelFormat format,
StorageType storage_type,
@@ -378,32 +390,39 @@ scoped_refptr<VideoFrame> VideoFrame::WrapCVPixelBuffer(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size) {
+ const scoped_refptr<VideoFrame>& frame,
+ VideoPixelFormat format,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size) {
// Frames with textures need mailbox info propagated, and there's no support
// for that here yet, see http://crbug/362521.
CHECK(!frame->HasTextures());
-
DCHECK(frame->visible_rect().Contains(visible_rect));
- if (!IsValidConfig(frame->format(), frame->storage_type(),
- frame->coded_size(), visible_rect, natural_size)) {
+ if (!AreValidPixelFormatsForWrap(frame->format(), format)) {
+ LOG(DFATAL) << __FUNCTION__ << " Invalid format conversion."
+ << VideoPixelFormatToString(frame->format()) << " to "
+ << VideoPixelFormatToString(format);
+ return nullptr;
+ }
+
+ if (!IsValidConfig(format, frame->storage_type(), frame->coded_size(),
+ visible_rect, natural_size)) {
LOG(DFATAL) << __FUNCTION__ << " Invalid config."
- << ConfigToString(frame->format(), frame->storage_type(),
+ << ConfigToString(format, frame->storage_type(),
frame->coded_size(), visible_rect,
natural_size);
return nullptr;
}
- scoped_refptr<VideoFrame> wrapping_frame(new VideoFrame(
- frame->format(), frame->storage_type(), frame->coded_size(), visible_rect,
- natural_size, frame->timestamp()));
+ scoped_refptr<VideoFrame> wrapping_frame(
+ new VideoFrame(format, frame->storage_type(), frame->coded_size(),
+ visible_rect, natural_size, frame->timestamp()));
// Copy all metadata to the wrapped frame.
wrapping_frame->metadata()->MergeMetadataFrom(frame->metadata());
- for (size_t i = 0; i < NumPlanes(frame->format()); ++i) {
+ for (size_t i = 0; i < NumPlanes(format); ++i) {
wrapping_frame->strides_[i] = frame->stride(i);
wrapping_frame->data_[i] = frame->data(i);
}
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 9538d7a..944a040 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -245,6 +245,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// frame->visible_rect().
static scoped_refptr<VideoFrame> WrapVideoFrame(
const scoped_refptr<VideoFrame>& frame,
+ VideoPixelFormat format,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size);
diff --git a/media/base/video_frame_pool.cc b/media/base/video_frame_pool.cc
index 40611ee..2bc7ff6 100644
--- a/media/base/video_frame_pool.cc
+++ b/media/base/video_frame_pool.cc
@@ -86,7 +86,7 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
}
scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
- frame, frame->visible_rect(), frame->natural_size());
+ frame, frame->format(), frame->visible_rect(), frame->natural_size());
wrapped_frame->AddDestructionObserver(
base::Bind(&VideoFramePool::PoolImpl::FrameReleased, this, frame));
return wrapped_frame;
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index 1b45ea8..79652bb 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -242,11 +242,10 @@ TEST(VideoFrame, WrapVideoFrame) {
gfx::Size natural_size = visible_rect.size();
wrapped_frame->metadata()->SetTimeDelta(
media::VideoFrameMetadata::FRAME_DURATION, kFrameDuration);
- frame = media::VideoFrame::WrapVideoFrame(wrapped_frame, visible_rect,
- natural_size);
+ frame = media::VideoFrame::WrapVideoFrame(
+ wrapped_frame, wrapped_frame->format(), visible_rect, natural_size);
frame->AddDestructionObserver(base::Bind(
&FrameNoLongerNeededCallback, wrapped_frame, &done_callback_was_run));
-
EXPECT_EQ(wrapped_frame->coded_size(), frame->coded_size());
EXPECT_EQ(wrapped_frame->data(media::VideoFrame::kYPlane),
frame->data(media::VideoFrame::kYPlane));
diff --git a/media/base/video_util.cc b/media/base/video_util.cc
index c628673..d9954f8 100644
--- a/media/base/video_util.cc
+++ b/media/base/video_util.cc
@@ -14,6 +14,13 @@
namespace media {
+namespace {
+
+// Empty method used for keeping a reference to the original media::VideoFrame.
+void ReleaseOriginalFrame(const scoped_refptr<media::VideoFrame>& frame) {}
+
+} // namespace
+
gfx::Size GetNaturalSize(const gfx::Size& visible_size,
int aspect_ratio_numerator,
int aspect_ratio_denominator) {
@@ -316,4 +323,20 @@ void CopyRGBToVideoFrame(const uint8_t* source,
uv_stride);
}
+scoped_refptr<VideoFrame> WrapAsI420VideoFrame(
+ const scoped_refptr<VideoFrame>& frame) {
+ DCHECK_EQ(VideoFrame::STORAGE_OWNED_MEMORY, frame->storage_type());
+ DCHECK_EQ(PIXEL_FORMAT_YV12A, frame->format());
+
+ scoped_refptr<media::VideoFrame> wrapped_frame =
+ media::VideoFrame::WrapVideoFrame(frame, PIXEL_FORMAT_I420,
+ frame->visible_rect(),
+ frame->natural_size());
+ if (!wrapped_frame)
+ return nullptr;
+ wrapped_frame->AddDestructionObserver(
+ base::Bind(&ReleaseOriginalFrame, frame));
+ return wrapped_frame;
+}
+
} // namespace media
diff --git a/media/base/video_util.h b/media/base/video_util.h
index e513433..2dec7e9 100644
--- a/media/base/video_util.h
+++ b/media/base/video_util.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include "base/memory/ref_counted.h"
#include "media/base/media_export.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
@@ -92,6 +93,10 @@ MEDIA_EXPORT void CopyRGBToVideoFrame(const uint8_t* source,
const gfx::Rect& region_in_frame,
VideoFrame* frame);
+// Converts a frame with YV12A format into I420 by dropping alpha channel.
+MEDIA_EXPORT scoped_refptr<VideoFrame> WrapAsI420VideoFrame(
+ const scoped_refptr<VideoFrame>& frame);
+
} // namespace media
#endif // MEDIA_BASE_VIDEO_UTIL_H_