summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorposciak <posciak@chromium.org>2015-01-06 22:37:59 -0800
committerCommit bot <commit-bot@chromium.org>2015-01-07 06:38:41 +0000
commit3b87d51779440abedbf5fd0578b7ea02b988d876 (patch)
tree31272766cd5b178762729800fb1a2004100afbd5 /media
parent5eea335f462d8e8ae82dae916e426a63b070cb48 (diff)
downloadchromium_src-3b87d51779440abedbf5fd0578b7ea02b988d876.zip
chromium_src-3b87d51779440abedbf5fd0578b7ea02b988d876.tar.gz
chromium_src-3b87d51779440abedbf5fd0578b7ea02b988d876.tar.bz2
V4L2VDA: Generalize EGLImage import and query driver for output formats.
Generalize CreateEGLImage() from being NV12M-specific to support any format. Add a method to query whether current platform supports importing a given V4L2/DRM format into EGLImages and use it in V4L2VDA. Dynamically query the driver for supported V4L2 output formats and choose one we can use for EGLImage import. TEST=Video playback for affected platforms BUG=chromium:430638,chrome-os-partner:33728,chromium:350197 Review URL: https://codereview.chromium.org/839523002 Cr-Commit-Position: refs/heads/master@{#310241}
Diffstat (limited to 'media')
-rw-r--r--media/base/video_frame.cc58
-rw-r--r--media/base/video_frame.h5
-rw-r--r--media/blink/video_frame_compositor.cc1
-rw-r--r--media/filters/skcanvas_video_renderer.cc5
-rw-r--r--media/mojo/interfaces/media_types.mojom3
-rw-r--r--media/mojo/services/media_type_converters.cc1
6 files changed, 56 insertions, 17 deletions
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 7c19df2..32c203c 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -67,6 +67,7 @@ static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
case VideoFrame::NATIVE_TEXTURE:
+ case VideoFrame::ARGB:
break;
}
}
@@ -91,7 +92,13 @@ static gfx::Size CommonAlignment(VideoFrame::Format format) {
// 2 for the UV plane in NV12.
static int BytesPerElement(VideoFrame::Format format, size_t plane) {
DCHECK(VideoFrame::IsValidPlane(plane, format));
- return (format == VideoFrame::NV12 && plane == VideoFrame::kUVPlane) ? 2 : 1;
+ if (format == VideoFrame::ARGB)
+ return 4;
+
+ if (format == VideoFrame::NV12 && plane == VideoFrame::kUVPlane)
+ return 2;
+
+ return 1;
}
// Rounds up |coded_size| if necessary for |format|.
@@ -109,12 +116,25 @@ scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- DCHECK(format != VideoFrame::UNKNOWN &&
- format != VideoFrame::NV12 &&
- format != VideoFrame::NATIVE_TEXTURE);
+ switch (format) {
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12A:
+ case VideoFrame::YV12J:
+ case VideoFrame::YV24:
+ break;
+
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NV12:
+ case VideoFrame::NATIVE_TEXTURE:
#if defined(VIDEO_HOLE)
- DCHECK(format != VideoFrame::HOLE);
+ case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
// Since we're creating a new YUV frame (and allocating memory for it
// ourselves), we can pad the requested |coded_size| if necessary if the
@@ -159,6 +179,8 @@ std::string VideoFrame::FormatToString(VideoFrame::Format format) {
return "NV12";
case VideoFrame::YV24:
return "YV24";
+ case VideoFrame::ARGB:
+ return "ARGB";
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
return "";
@@ -202,6 +224,7 @@ bool VideoFrame::IsValidConfig(VideoFrame::Format format,
case VideoFrame::YV12A:
case VideoFrame::NV12:
case VideoFrame::YV16:
+ case VideoFrame::ARGB:
// Check that software-allocated buffer formats are aligned correctly and
// not empty.
const gfx::Size alignment = CommonAlignment(format);
@@ -304,6 +327,8 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
return NULL;
+ // TODO(posciak): This is not exactly correct, it's possible for one
+ // buffer to contain more than one plane.
if (dmabuf_fds.size() != NumPlanes(format)) {
LOG(FATAL) << "Not enough dmabuf fds provided!";
return NULL;
@@ -522,6 +547,8 @@ size_t VideoFrame::NumPlanes(Format format) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
return 0;
+ case VideoFrame::ARGB:
+ return 1;
case VideoFrame::NV12:
return 2;
case VideoFrame::YV12:
@@ -554,11 +581,15 @@ gfx::Size VideoFrame::PlaneSize(Format format,
const gfx::Size& coded_size) {
DCHECK(IsValidPlane(plane, format));
- // Align to multiple-of-two size overall. This ensures that non-subsampled
- // planes can be addressed by pixel with the same scaling as the subsampled
- // planes.
- const int width = RoundUp(coded_size.width(), 2);
- const int height = RoundUp(coded_size.height(), 2);
+ int width = coded_size.width();
+ int height = coded_size.height();
+ if (format != VideoFrame::ARGB) {
+ // Align to multiple-of-two size overall. This ensures that non-subsampled
+ // planes can be addressed by pixel with the same scaling as the subsampled
+ // planes.
+ width = RoundUp(width, 2);
+ height = RoundUp(height, 2);
+ }
const gfx::Size subsample = SampleSize(format, plane);
DCHECK(width % subsample.width() == 0);
@@ -570,7 +601,6 @@ gfx::Size VideoFrame::PlaneSize(Format format,
size_t VideoFrame::PlaneAllocationSize(Format format,
size_t plane,
const gfx::Size& coded_size) {
- // VideoFrame formats are (so far) all YUV and 1 byte per sample.
return PlaneSize(format, plane, coded_size).GetArea();
}
@@ -578,9 +608,9 @@ size_t VideoFrame::PlaneAllocationSize(Format format,
int VideoFrame::PlaneHorizontalBitsPerPixel(Format format, size_t plane) {
DCHECK(IsValidPlane(plane, format));
const int bits_per_element = 8 * BytesPerElement(format, plane);
- const int pixels_per_element = SampleSize(format, plane).width();
- DCHECK(bits_per_element % pixels_per_element == 0);
- return bits_per_element / pixels_per_element;
+ const int horiz_pixels_per_element = SampleSize(format, plane).width();
+ DCHECK_EQ(bits_per_element % horiz_pixels_per_element, 0);
+ return bits_per_element / horiz_pixels_per_element;
}
// static
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 54da038..0ffd058 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -40,6 +40,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
kMaxPlanes = 4,
kYPlane = 0,
+ kARGBPlane = kYPlane,
kUPlane = 1,
kUVPlane = kUPlane,
kVPlane = 2,
@@ -63,7 +64,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
YV12J = 7, // JPEG color range version of YV12
NV12 = 8, // 12bpp 1x1 Y plane followed by an interleaved 2x2 UV plane.
YV24 = 9, // 24bpp YUV planar, no subsampling.
- FORMAT_MAX = YV24, // Must always be equal to largest entry logged.
+ ARGB = 10, // 32bpp ARGB, 1 plane.
+ // Please update UMA histogram enumeration when adding new formats here.
+ FORMAT_MAX = ARGB, // Must always be equal to largest entry logged.
};
// Returns the name of a Format as a string.
diff --git a/media/blink/video_frame_compositor.cc b/media/blink/video_frame_compositor.cc
index 7f254d4..f57ff75 100644
--- a/media/blink/video_frame_compositor.cc
+++ b/media/blink/video_frame_compositor.cc
@@ -24,6 +24,7 @@ static bool IsOpaque(const scoped_refptr<VideoFrame>& frame) {
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
case VideoFrame::NATIVE_TEXTURE:
+ case VideoFrame::ARGB:
break;
}
return false;
diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc
index 2243445..51af5f2 100644
--- a/media/filters/skcanvas_video_renderer.cc
+++ b/media/filters/skcanvas_video_renderer.cc
@@ -52,6 +52,7 @@ bool IsYUV(media::VideoFrame::Format format) {
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
return false;
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
@@ -73,6 +74,7 @@ bool IsJPEGColorSpace(media::VideoFrame::Format format) {
#if defined(VIDEO_HOLE)
case VideoFrame::HOLE:
#endif // defined(VIDEO_HOLE)
+ case VideoFrame::ARGB:
return false;
}
NOTREACHED() << "Invalid videoframe format provided: " << format;
@@ -198,7 +200,6 @@ void ConvertVideoFrameToRGBPixels(
break;
case media::VideoFrame::NATIVE_TEXTURE: {
- DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
SkBitmap tmp;
tmp.installPixels(
SkImageInfo::MakeN32Premul(video_frame->visible_rect().width(),
@@ -208,6 +209,8 @@ void ConvertVideoFrameToRGBPixels(
video_frame->ReadPixelsFromNativeTexture(tmp);
break;
}
+
+ case media::VideoFrame::ARGB:
default:
NOTREACHED();
break;
diff --git a/media/mojo/interfaces/media_types.mojom b/media/mojo/interfaces/media_types.mojom
index 417bf97..c14a80e 100644
--- a/media/mojo/interfaces/media_types.mojom
+++ b/media/mojo/interfaces/media_types.mojom
@@ -100,7 +100,8 @@ enum VideoFormat {
YV12J,
NV12,
YV24,
- FORMAT_MAX = YV24,
+ ARGB,
+ FORMAT_MAX = ARGB,
};
// See media/base/video_decoder_config.h for descriptions.
diff --git a/media/mojo/services/media_type_converters.cc b/media/mojo/services/media_type_converters.cc
index b037f98..2735004 100644
--- a/media/mojo/services/media_type_converters.cc
+++ b/media/mojo/services/media_type_converters.cc
@@ -141,6 +141,7 @@ ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV12J, VIDEO_FORMAT_YV12J);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::NV12, VIDEO_FORMAT_NV12);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV24, VIDEO_FORMAT_YV24);
+ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::ARGB, VIDEO_FORMAT_ARGB);
ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
VideoFrame::FORMAT_MAX,
VIDEO_FORMAT_FORMAT_MAX);