summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cc/resources/video_resource_updater.cc4
-rw-r--r--media/base/video_frame.cc9
-rw-r--r--media/base/video_frame.h1
-rw-r--r--media/base/video_util.cc1
-rw-r--r--media/ffmpeg/ffmpeg_common.cc8
-rw-r--r--media/filters/ffmpeg_video_decoder.cc3
-rw-r--r--media/filters/skcanvas_video_renderer.cc11
7 files changed, 30 insertions, 7 deletions
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc
index 8130fd58..ddb2277 100644
--- a/cc/resources/video_resource_updater.cc
+++ b/cc/resources/video_resource_updater.cc
@@ -69,6 +69,7 @@ bool VideoResourceUpdater::VerifyFrame(
case media::VideoFrame::YV12:
case media::VideoFrame::YV12A:
case media::VideoFrame::YV16:
+ case media::VideoFrame::YV12J:
case media::VideoFrame::NATIVE_TEXTURE:
#if defined(GOOGLE_TV)
case media::VideoFrame::HOLE:
@@ -99,6 +100,7 @@ static gfx::Size SoftwarePlaneDimension(
switch (input_frame_format) {
case media::VideoFrame::YV12:
case media::VideoFrame::YV12A:
+ case media::VideoFrame::YV12J:
return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 0.5f));
case media::VideoFrame::YV16:
return gfx::ToFlooredSize(gfx::ScaleSize(coded_size, 0.5f, 1.f));
@@ -133,9 +135,11 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
// Only YUV software video frames are supported.
DCHECK(input_frame_format == media::VideoFrame::YV12 ||
input_frame_format == media::VideoFrame::YV12A ||
+ input_frame_format == media::VideoFrame::YV12J ||
input_frame_format == media::VideoFrame::YV16);
if (input_frame_format != media::VideoFrame::YV12 &&
input_frame_format != media::VideoFrame::YV12A &&
+ input_frame_format != media::VideoFrame::YV12J &&
input_frame_format != media::VideoFrame::YV16)
return VideoFrameExternalResources();
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 3541e6e..fcf777a 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -32,6 +32,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
case VideoFrame::YV12A:
case VideoFrame::YV16:
case VideoFrame::I420:
+ case VideoFrame::YV12J:
frame->AllocateYUV();
break;
default:
@@ -59,6 +60,8 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
#endif
case VideoFrame::YV12A:
return "YV12A";
+ case VideoFrame::YV12J:
+ return "YV12J";
case VideoFrame::HISTOGRAM_MAX:
return "HISTOGRAM_MAX";
}
@@ -236,6 +239,7 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::YV12:
case VideoFrame::YV16:
case VideoFrame::I420:
+ case VideoFrame::YV12J:
return 3;
case VideoFrame::YV12A:
return 4;
@@ -269,6 +273,7 @@ size_t VideoFrame::PlaneAllocationSize(Format format,
RoundUp(coded_size.width(), 2) * RoundUp(coded_size.height(), 2);
switch (format) {
case VideoFrame::YV12:
+ case VideoFrame::YV12J:
case VideoFrame::I420: {
switch (plane) {
case VideoFrame::kYPlane:
@@ -324,7 +329,8 @@ static void ReleaseData(uint8* data) {
void VideoFrame::AllocateYUV() {
DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16 ||
- format_ == VideoFrame::YV12A || format_ == VideoFrame::I420);
+ format_ == VideoFrame::YV12A || format_ == VideoFrame::I420 ||
+ format_ == VideoFrame::YV12J);
// Align Y rows at least at 16 byte boundaries. The stride for both
// YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
// U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
@@ -419,6 +425,7 @@ int VideoFrame::row_bytes(size_t plane) const {
case YV12:
case YV16:
case I420:
+ case YV12J:
if (plane == kYPlane)
return width;
return RoundUp(width, 2) / 2;
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index ed554a1..3c44009 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -48,6 +48,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
HOLE = 5, // Hole frame.
#endif
NATIVE_TEXTURE = 6, // Native texture. Pixel-format agnostic.
+ YV12J = 7, // JPEG color range version of YV12
HISTOGRAM_MAX, // Must always be greatest.
};
diff --git a/media/base/video_util.cc b/media/base/video_util.cc
index fda758e..09f37b8 100644
--- a/media/base/video_util.cc
+++ b/media/base/video_util.cc
@@ -144,6 +144,7 @@ void LetterboxYUV(VideoFrame* frame, const gfx::Rect& view_area) {
DCHECK(!(view_area.width() & 1));
DCHECK(!(view_area.height() & 1));
DCHECK(frame->format() == VideoFrame::YV12 ||
+ frame->format() == VideoFrame::YV12J ||
frame->format() == VideoFrame::I420);
LetterboxPlane(frame, VideoFrame::kYPlane, view_area, 0x00);
gfx::Rect half_view_area(view_area.x() / 2,
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index 905e8d9..40696c6 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -513,12 +513,10 @@ VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
switch (pixel_format) {
case PIX_FMT_YUV422P:
return VideoFrame::YV16;
- // TODO(scherkus): We should be paying attention to the color range of each
- // format and scaling as appropriate when rendering. Regular YUV has a range
- // of 16-239 where as YUVJ has a range of 0-255.
case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
return VideoFrame::YV12;
+ case PIX_FMT_YUVJ420P:
+ return VideoFrame::YV12J;
case PIX_FMT_YUVA420P:
return VideoFrame::YV12A;
default:
@@ -533,6 +531,8 @@ PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
return PIX_FMT_YUV422P;
case VideoFrame::YV12:
return PIX_FMT_YUV420P;
+ case VideoFrame::YV12J:
+ return PIX_FMT_YUVJ420P;
case VideoFrame::YV12A:
return PIX_FMT_YUVA420P;
default:
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index eccfee2..75289ce 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -71,7 +71,8 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
- DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16);
+ DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
+ format == VideoFrame::YV12J);
gfx::Size size(codec_context->width, codec_context->height);
int ret;
diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc
index f0bf13d..ec3e92f 100644
--- a/media/filters/skcanvas_video_renderer.cc
+++ b/media/filters/skcanvas_video_renderer.cc
@@ -13,7 +13,9 @@
namespace media {
static bool IsEitherYV12OrYV16(media::VideoFrame::Format format) {
- return format == media::VideoFrame::YV12 || format == media::VideoFrame::YV16;
+ return format == media::VideoFrame::YV12 ||
+ format == media::VideoFrame::YV16 ||
+ format == media::VideoFrame::YV12J;
}
static bool IsEitherYV12OrYV16OrNative(media::VideoFrame::Format format) {
@@ -87,6 +89,11 @@ static void FastPaint(
y_shift = 1;
}
+ if (video_frame->format() == media::VideoFrame::YV12J) {
+ yuv_type = media::YV12;
+ y_shift = 1;
+ }
+
// Transform the destination rectangle to local coordinates.
const SkMatrix& local_matrix = canvas->getTotalMatrix();
SkRect local_dest_rect;
@@ -217,8 +224,10 @@ static void ConvertVideoFrameToBitmap(
(video_frame->visible_rect().y() >> y_shift)) +
(video_frame->visible_rect().x() >> 1);
}
+
switch (video_frame->format()) {
case media::VideoFrame::YV12:
+ case media::VideoFrame::YV12J:
media::ConvertYUVToRGB32(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,