diff options
author | scherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-03 21:39:11 +0000 |
---|---|---|
committer | scherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-03 21:39:11 +0000 |
commit | cfbe8e44e66cdac1d9fd259b66bb0e70dcca84d9 (patch) | |
tree | 7faf929bd42948a604916ffb1b62b2b10387a027 | |
parent | 9d55c84026a0c4ad34ac7446e6219b8e00d9fc8a (diff) | |
download | chromium_src-cfbe8e44e66cdac1d9fd259b66bb0e70dcca84d9.zip chromium_src-cfbe8e44e66cdac1d9fd259b66bb0e70dcca84d9.tar.gz chromium_src-cfbe8e44e66cdac1d9fd259b66bb0e70dcca84d9.tar.bz2 |
Basic GPU accelerated video rendering.
Fragment shader is used for YUV to RGB conversion. No actual layering yet, so the video is still rendered on top of everything else.
BUG=33329
TEST=Watch some <video> with --enable-video-layering --enable-gpu-rendering
Review URL: http://codereview.chromium.org/661430
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@40538 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | chrome/gpu/gpu_video_layer_glx.cc | 261 | ||||
-rw-r--r-- | chrome/gpu/gpu_video_layer_glx.h | 33 | ||||
-rw-r--r-- | chrome/gpu/gpu_view_x.cc | 5 |
3 files changed, 294 insertions, 5 deletions
diff --git a/chrome/gpu/gpu_video_layer_glx.cc b/chrome/gpu/gpu_video_layer_glx.cc index 99b4ac6..b0b9cec 100644 --- a/chrome/gpu/gpu_video_layer_glx.cc +++ b/chrome/gpu/gpu_video_layer_glx.cc @@ -4,8 +4,65 @@ #include "chrome/gpu/gpu_video_layer_glx.h" +#include <GL/glew.h> + #include "chrome/common/gpu_messages.h" #include "chrome/gpu/gpu_thread.h" +#include "chrome/gpu/gpu_view_x.h" + +// Handy constants for addressing YV12 data. +static const int kYUVPlanes = 3; +static const int kYPlane = 0; +static const int kUPlane = 1; +static const int kVPlane = 2; + +// Buffer size for shader compile errors. +static const unsigned int kErrorSize = 4096; + +// Matrix used for the YUV to RGB conversion. +static const float kYUV2RGB[9] = { + 1.f, 0.f, 1.403f, + 1.f, -.344f, -.714f, + 1.f, 1.772f, 0.f, +}; + +// Texture coordinates mapping the entire texture. +static const float kTextureCoords[8] = { + 0, 0, + 0, 1, + 1, 0, + 1, 1, +}; + +// Pass-through vertex shader. +static const char kVertexShader[] = + "varying vec2 interp_tc;\n" + "\n" + "attribute vec4 in_pos;\n" + "attribute vec2 in_tc;\n" + "\n" + "void main() {\n" + " interp_tc = in_tc;\n" + " gl_Position = in_pos;\n" + "}\n"; + +// YUV to RGB pixel shader. Loads a pixel from each plane and pass through the +// matrix. +static const char kFragmentShader[] = + "varying vec2 interp_tc;\n" + "\n" + "uniform sampler2D y_tex;\n" + "uniform sampler2D u_tex;\n" + "uniform sampler2D v_tex;\n" + "uniform mat3 yuv2rgb;\n" + "\n" + "void main() {\n" + " float y = texture2D(y_tex, interp_tc).x;\n" + " float u = texture2D(u_tex, interp_tc).r - .5;\n" + " float v = texture2D(v_tex, interp_tc).r - .5;\n" + " vec3 rgb = yuv2rgb * vec3(y, u, v);\n" + " gl_FragColor = vec4(rgb, 1);\n" + "}\n"; GpuVideoLayerGLX::GpuVideoLayerGLX(GpuViewX* view, GpuThread* gpu_thread, @@ -14,14 +71,145 @@ GpuVideoLayerGLX::GpuVideoLayerGLX(GpuViewX* view, : view_(view), gpu_thread_(gpu_thread), routing_id_(routing_id), - size_(size) { + native_size_(size), + program_(0) { + memset(textures_, 0, sizeof(textures_)); + + // Load identity vertices. + gfx::Rect identity(0, 0, 1, 1); + CalculateVertices(identity.size(), identity, target_vertices_); + gpu_thread_->AddRoute(routing_id_, this); + + view_->BindContext(); // Must do this before issuing OpenGl. + + glMatrixMode(GL_MODELVIEW); + + // Create 3 textures, one for each plane, and bind them to different + // texture units. + glGenTextures(kYUVPlanes, textures_); + + glBindTexture(GL_TEXTURE_2D, textures_[kYPlane]); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + + glBindTexture(GL_TEXTURE_2D, textures_[kUPlane]); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + + glBindTexture(GL_TEXTURE_2D, textures_[kVPlane]); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + + // Create our YUV->RGB shader. + program_ = glCreateProgram(); + GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER); + const char* vs_source = kVertexShader; + int vs_size = sizeof(kVertexShader); + glShaderSource(vertex_shader, 1, &vs_source, &vs_size); + glCompileShader(vertex_shader); + int result = GL_FALSE; + glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &result); + if (!result) { + char log[kErrorSize]; + int len; + glGetShaderInfoLog(vertex_shader, kErrorSize - 1, &len, log); + log[kErrorSize - 1] = 0; + LOG(FATAL) << log; + } + glAttachShader(program_, vertex_shader); + glDeleteShader(vertex_shader); + + GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER); + const char* ps_source = kFragmentShader; + int ps_size = sizeof(kFragmentShader); + glShaderSource(fragment_shader, 1, &ps_source, &ps_size); + glCompileShader(fragment_shader); + result = GL_FALSE; + glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &result); + if (!result) { + char log[kErrorSize]; + int len; + glGetShaderInfoLog(fragment_shader, kErrorSize - 1, &len, log); + log[kErrorSize - 1] = 0; + LOG(FATAL) << log; + } + glAttachShader(program_, fragment_shader); + glDeleteShader(fragment_shader); + + glLinkProgram(program_); + result = GL_FALSE; + glGetProgramiv(program_, GL_LINK_STATUS, &result); + if (!result) { + char log[kErrorSize]; + int len; + glGetProgramInfoLog(program_, kErrorSize - 1, &len, log); + log[kErrorSize - 1] = 0; + LOG(FATAL) << log; + } } GpuVideoLayerGLX::~GpuVideoLayerGLX() { + // TODO(scherkus): this seems like a bad idea.. we might be better off with + // separate Initialize()/Teardown() calls instead. + view_->BindContext(); + if (program_) { + glDeleteProgram(program_); + } + gpu_thread_->RemoveRoute(routing_id_); } +void GpuVideoLayerGLX::Render(const gfx::Size& viewport_size) { + // Nothing to do if we're not visible or have no YUV data. + if (target_rect_.IsEmpty()) { + return; + } + + // Calculate the position of our quad. + CalculateVertices(viewport_size, target_rect_, target_vertices_); + + // Bind Y, U and V textures to texture units. + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, textures_[kYPlane]); + glActiveTexture(GL_TEXTURE1); + glBindTexture(GL_TEXTURE_2D, textures_[kUPlane]); + glActiveTexture(GL_TEXTURE2); + glBindTexture(GL_TEXTURE_2D, textures_[kVPlane]); + + // Bind vertex/fragment shader program. + glUseProgram(program_); + + // Bind parameters. + glUniform1i(glGetUniformLocation(program_, "y_tex"), 0); + glUniform1i(glGetUniformLocation(program_, "u_tex"), 1); + glUniform1i(glGetUniformLocation(program_, "v_tex"), 2); + + int yuv2rgb_location = glGetUniformLocation(program_, "yuv2rgb"); + glUniformMatrix3fv(yuv2rgb_location, 1, GL_TRUE, kYUV2RGB); + + // TODO(scherkus): instead of calculating and loading a geometry each time, + // we should store a constant geometry in a VBO and use a vertex shader. + int pos_location = glGetAttribLocation(program_, "in_pos"); + glEnableVertexAttribArray(pos_location); + glVertexAttribPointer(pos_location, 2, GL_FLOAT, GL_FALSE, 0, + target_vertices_); + + int tc_location = glGetAttribLocation(program_, "in_tc"); + glEnableVertexAttribArray(tc_location); + glVertexAttribPointer(tc_location, 2, GL_FLOAT, GL_FALSE, 0, + kTextureCoords); + + // Render! + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + + // Reset back to original state. + glDisableVertexAttribArray(pos_location); + glDisableVertexAttribArray(tc_location); + glActiveTexture(GL_TEXTURE0); + glUseProgram(0); +} + void GpuVideoLayerGLX::OnMessageReceived(const IPC::Message& msg) { IPC_BEGIN_MESSAGE_MAP(GpuVideoLayerGLX, msg) IPC_MESSAGE_HANDLER(GpuMsg_PaintToVideoLayer, OnPaintToVideoLayer) @@ -40,9 +228,76 @@ void GpuVideoLayerGLX::OnChannelError() { void GpuVideoLayerGLX::OnPaintToVideoLayer(base::ProcessId source_process_id, TransportDIB::Id id, const gfx::Rect& bitmap_rect) { - // TODO(scherkus): implement GPU video layer. - NOTIMPLEMENTED(); + // Assume that somewhere along the line, someone will do width * height * 4 + // with signed numbers. If the maximum value is 2**31, then 2**31 / 4 = + // 2**29 and floor(sqrt(2**29)) = 23170. + // + // TODO(scherkus): |native_size_| is set in constructor, so perhaps this check + // should be a DCHECK(). + const int width = native_size_.width(); + const int height = native_size_.height(); + const int stride = width; + if (width > 23170 || height > 23170) + return; + + TransportDIB* dib = TransportDIB::Map(id); + if (!dib) + return; + + // Everything looks good, update our target position and size. + target_rect_ = bitmap_rect; + + // Perform colour space conversion. + uint8* planes[kYUVPlanes]; + planes[kYPlane] = reinterpret_cast<uint8*>(dib->memory()); + planes[kUPlane] = planes[kYPlane] + width * height; + planes[kVPlane] = planes[kUPlane] + ((width * height) >> 2); + + view_->BindContext(); // Must do this before issuing OpenGl. + + // Assume YV12 format. + for (int i = 0; i < kYUVPlanes; ++i) { + int plane_width = (i == kYPlane ? width : width / 2); + int plane_height = (i == kYPlane ? height : height / 2); + int plane_stride = (i == kYPlane ? stride : stride / 2); + + glActiveTexture(GL_TEXTURE0 + i); + glBindTexture(GL_TEXTURE_2D, textures_[i]); + glPixelStorei(GL_UNPACK_ROW_LENGTH, plane_stride); + glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, plane_width, plane_height, 0, + GL_LUMINANCE, GL_UNSIGNED_BYTE, planes[i]); + } + + // Reset back to original state. + glActiveTexture(GL_TEXTURE0); + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glFlush(); // TODO(scherkus): we may not need to ACK video layer updates at all. gpu_thread_->Send(new GpuHostMsg_PaintToVideoLayer_ACK(routing_id_)); } + +// static +void GpuVideoLayerGLX::CalculateVertices(const gfx::Size& world, + const gfx::Rect& object, + float* vertices) { + // Don't forget GL has a flipped Y-axis! + float width = world.width(); + float height = world.height(); + + // Top left. + vertices[0] = 2.0f * (object.x() / width) - 1.0f; + vertices[1] = -2.0f * (object.y() / height) + 1.0f; + + // Bottom left. + vertices[2] = 2.0f * (object.x() / width) - 1.0f; + vertices[3] = -2.0f * (object.bottom() / height) + 1.0f; + + // Top right. + vertices[4] = 2.0f * (object.right() / width) - 1.0f; + vertices[5] = -2.0f * (object.y() / height) + 1.0f; + + // Bottom right. + vertices[6] = 2.0f * (object.right() / width) - 1.0f; + vertices[7] = -2.0f * (object.bottom() / height) + 1.0f; +} diff --git a/chrome/gpu/gpu_video_layer_glx.h b/chrome/gpu/gpu_video_layer_glx.h index 772ff6f..bcfb42f4 100644 --- a/chrome/gpu/gpu_video_layer_glx.h +++ b/chrome/gpu/gpu_video_layer_glx.h @@ -22,7 +22,12 @@ class GpuVideoLayerGLX : public IPC::Channel::Listener { const gfx::Size& size); virtual ~GpuVideoLayerGLX(); - const gfx::Size& size() const { return size_; } + // Renders the video layer using the current GL context with respect to the + // given |viewport_size|. + // + // TODO(scherkus): we also need scrolling information to determine where + // exactly to place our quad. + void Render(const gfx::Size& viewport_size); // IPC::Channel::Listener implementation. virtual void OnMessageReceived(const IPC::Message& message); @@ -35,10 +40,34 @@ class GpuVideoLayerGLX : public IPC::Channel::Listener { TransportDIB::Id id, const gfx::Rect& bitmap_rect); + // Calculates vertices for |object| relative to |world|, where |world| is + // assumed to represent a full-screen quad. |vertices| should be an array of + // 8 floats. + // + // TODO(scherkus): not sure how to describe what this does. + static void CalculateVertices(const gfx::Size& world, + const gfx::Rect& object, + float* vertices); + + // GPU process related. GpuViewX* view_; GpuThread* gpu_thread_; int32 routing_id_; - gfx::Size size_; + + // The native size of the incoming YUV frames. + gfx::Size native_size_; + + // The target absolute position and size of the RGB frames. + gfx::Rect target_rect_; + + // The target absolute position and size expressed as quad vertices. + float target_vertices_[8]; + + // 3 textures, one for each plane. + unsigned int textures_[3]; + + // Shader program for YUV->RGB conversion. + unsigned int program_; DISALLOW_COPY_AND_ASSIGN(GpuVideoLayerGLX); }; diff --git a/chrome/gpu/gpu_view_x.cc b/chrome/gpu/gpu_view_x.cc index 3f698f4..b4c0f10 100644 --- a/chrome/gpu/gpu_view_x.cc +++ b/chrome/gpu/gpu_view_x.cc @@ -87,6 +87,11 @@ void GpuViewX::Repaint() { glEnd(); DCHECK(glGetError() == GL_NO_ERROR); + if (video_layer_.get()) { + video_layer_->Render(backing_store_->size()); + DCHECK(glGetError() == GL_NO_ERROR); + } + // TODO(brettw) when we no longer stretch non-fitting bitmaps, we should // paint white over any unpainted area here. |