summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorsheu@chromium.org <sheu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-14 12:41:39 +0000
committersheu@chromium.org <sheu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-14 12:41:39 +0000
commit85bbf7241198fb58559786fc447c507dc47406f7 (patch)
tree5c7fdaf3acc70cc6d539e684fe5ec7c8670ccbf5 /content
parenta41f7a7f81b338f2185a796ad35c92fffa703d7d (diff)
downloadchromium_src-85bbf7241198fb58559786fc447c507dc47406f7.zip
chromium_src-85bbf7241198fb58559786fc447c507dc47406f7.tar.gz
chromium_src-85bbf7241198fb58559786fc447c507dc47406f7.tar.bz2
Remove GSC usage from ExynosVideoDecodeAccelerator.
With support for compositing from GL_TEXTURE_EXTERNAL_OES image (and support from the Exynos GL stack for creating those textures), the video decoder does not have to perform an explicit YUV->RGB color conversion step. +27% throughput on birds0.h264 through vda_unittest. BUG=167417 TEST=local build, run on CrOS snow, vda_unittest TEST=local build, run on Android/clank Review URL: https://codereview.chromium.org/23526070 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@235128 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/common/gpu/media/exynos_video_decode_accelerator.cc885
-rw-r--r--content/common/gpu/media/exynos_video_decode_accelerator.h105
-rw-r--r--content/common/gpu/media/rendering_helper.cc78
-rw-r--r--content/common/gpu/media/rendering_helper.h4
-rw-r--r--content/common/gpu/media/video_decode_accelerator_unittest.cc12
-rw-r--r--content/gpu/gpu_main.cc4
6 files changed, 266 insertions, 822 deletions
diff --git a/content/common/gpu/media/exynos_video_decode_accelerator.cc b/content/common/gpu/media/exynos_video_decode_accelerator.cc
index d6646d1..9f0f57a 100644
--- a/content/common/gpu/media/exynos_video_decode_accelerator.cc
+++ b/content/common/gpu/media/exynos_video_decode_accelerator.cc
@@ -5,6 +5,7 @@
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
+#include <libdrm/drm_fourcc.h>
#include <linux/videodev2.h>
#include <poll.h>
#include <sys/eventfd.h>
@@ -56,15 +57,7 @@ namespace {
#endif
const char kExynosMfcDevice[] = "/dev/mfc-dec";
-const char kExynosGscDevice[] = "/dev/gsc1";
-const char kMaliDriver[] = "libmali.so";
-typedef EGLBoolean (*MaliEglImageGetBufferExtPhandleFunc)(EGLImageKHR, EGLint*,
- void*);
-
-void* libmali_handle = NULL;
-MaliEglImageGetBufferExtPhandleFunc
- mali_egl_image_get_buffer_ext_phandle = NULL;
} // anonymous namespace
struct ExynosVideoDecodeAccelerator::BitstreamBufferRef {
@@ -84,13 +77,14 @@ struct ExynosVideoDecodeAccelerator::BitstreamBufferRef {
};
struct ExynosVideoDecodeAccelerator::PictureBufferArrayRef {
- PictureBufferArrayRef(EGLDisplay egl_display, size_t count);
+ PictureBufferArrayRef(EGLDisplay egl_display);
~PictureBufferArrayRef();
struct PictureBufferRef {
+ PictureBufferRef(EGLImageKHR egl_image, int32 picture_id)
+ : egl_image(egl_image), picture_id(picture_id) {}
EGLImageKHR egl_image;
- int egl_image_fd;
- int32 client_id;
+ int32 picture_id;
};
EGLDisplay const egl_display;
@@ -131,24 +125,14 @@ ExynosVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
}
ExynosVideoDecodeAccelerator::PictureBufferArrayRef::PictureBufferArrayRef(
- EGLDisplay egl_display, size_t count)
- : egl_display(egl_display),
- picture_buffers(count) {
- for (size_t i = 0; i < picture_buffers.size(); ++i) {
- PictureBufferRef& buffer = picture_buffers[i];
- buffer.egl_image = EGL_NO_IMAGE_KHR;
- buffer.egl_image_fd = -1;
- buffer.client_id = -1;
- }
-}
+ EGLDisplay egl_display)
+ : egl_display(egl_display) {}
ExynosVideoDecodeAccelerator::PictureBufferArrayRef::~PictureBufferArrayRef() {
for (size_t i = 0; i < picture_buffers.size(); ++i) {
- PictureBufferRef& buffer = picture_buffers[i];
- if (buffer.egl_image != EGL_NO_IMAGE_KHR)
- eglDestroyImageKHR(egl_display, buffer.egl_image);
- if (buffer.egl_image_fd != -1)
- HANDLE_EINTR(close(buffer.egl_image_fd));
+ EGLImageKHR egl_image = picture_buffers[i].egl_image;
+ if (egl_image != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display, egl_image);
}
}
@@ -176,38 +160,17 @@ ExynosVideoDecodeAccelerator::MfcInputRecord::~MfcInputRecord() {
ExynosVideoDecodeAccelerator::MfcOutputRecord::MfcOutputRecord()
: at_device(false),
- input_id(-1) {
- bytes_used[0] = 0;
- bytes_used[1] = 0;
- address[0] = NULL;
- address[1] = NULL;
- length[0] = 0;
- length[1] = 0;
-}
-
-ExynosVideoDecodeAccelerator::MfcOutputRecord::~MfcOutputRecord() {
-}
-
-ExynosVideoDecodeAccelerator::GscInputRecord::GscInputRecord()
- : at_device(false),
- mfc_output(-1) {
-}
-
-ExynosVideoDecodeAccelerator::GscInputRecord::~GscInputRecord() {
-}
-
-ExynosVideoDecodeAccelerator::GscOutputRecord::GscOutputRecord()
- : at_device(false),
at_client(false),
- fd(-1),
egl_image(EGL_NO_IMAGE_KHR),
egl_sync(EGL_NO_SYNC_KHR),
picture_id(-1),
- cleared(false) {}
-
-ExynosVideoDecodeAccelerator::GscOutputRecord::~GscOutputRecord() {
+ cleared(false) {
+ for (size_t i = 0; i < arraysize(fds); ++i)
+ fds[i] = -1;
}
+ExynosVideoDecodeAccelerator::MfcOutputRecord::~MfcOutputRecord() {}
+
ExynosVideoDecodeAccelerator::PictureRecord::PictureRecord(
bool cleared,
const media::Picture& picture)
@@ -245,11 +208,6 @@ ExynosVideoDecodeAccelerator::ExynosVideoDecodeAccelerator(
mfc_output_buffer_queued_count_(0),
mfc_output_buffer_pixelformat_(0),
mfc_output_dpb_size_(0),
- gsc_fd_(-1),
- gsc_input_streamon_(false),
- gsc_input_buffer_queued_count_(0),
- gsc_output_streamon_(false),
- gsc_output_buffer_queued_count_(0),
picture_clearing_count_(0),
device_poll_thread_("ExynosDevicePollThread"),
device_poll_interrupt_fd_(-1),
@@ -266,12 +224,6 @@ ExynosVideoDecodeAccelerator::~ExynosVideoDecodeAccelerator() {
HANDLE_EINTR(close(device_poll_interrupt_fd_));
device_poll_interrupt_fd_ = -1;
}
- if (gsc_fd_ != -1) {
- DestroyGscInputBuffers();
- DestroyGscOutputBuffers();
- HANDLE_EINTR(close(gsc_fd_));
- gsc_fd_ = -1;
- }
if (mfc_fd_ != -1) {
DestroyMfcInputBuffers();
DestroyMfcOutputBuffers();
@@ -283,8 +235,6 @@ ExynosVideoDecodeAccelerator::~ExynosVideoDecodeAccelerator() {
// descriptors, mmap() segments, etc.
DCHECK(mfc_input_buffer_map_.empty());
DCHECK(mfc_output_buffer_map_.empty());
- DCHECK(gsc_input_buffer_map_.empty());
- DCHECK(gsc_output_buffer_map_.empty());
}
bool ExynosVideoDecodeAccelerator::Initialize(
@@ -312,13 +262,6 @@ bool ExynosVideoDecodeAccelerator::Initialize(
};
video_profile_ = profile;
- static bool sandbox_initialized = PostSandboxInitialization();
- if (!sandbox_initialized) {
- DLOG(ERROR) << "Initialize(): PostSandboxInitialization() failed";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return false;
- }
-
if (egl_display_ == EGL_NO_DISPLAY) {
DLOG(ERROR) << "Initialize(): could not get EGLDisplay";
NOTIFY_ERROR(PLATFORM_FAILURE);
@@ -354,15 +297,6 @@ bool ExynosVideoDecodeAccelerator::Initialize(
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
- DVLOG(2) << "Initialize(): opening GSC device: " << kExynosGscDevice;
- gsc_fd_ = HANDLE_EINTR(open(kExynosGscDevice,
- O_RDWR | O_NONBLOCK | O_CLOEXEC));
- if (gsc_fd_ == -1) {
- DPLOG(ERROR) << "Initialize(): could not open GSC device: "
- << kExynosGscDevice;
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return false;
- }
// Create the interrupt fd.
DCHECK_EQ(device_poll_interrupt_fd_, -1);
@@ -386,13 +320,6 @@ bool ExynosVideoDecodeAccelerator::Initialize(
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QUERYCAP, &caps);
- if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
- DLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
- ", caps check failed: 0x" << std::hex << caps.capabilities;
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return false;
- }
if (!CreateMfcInputBuffers())
return false;
@@ -401,7 +328,7 @@ bool ExynosVideoDecodeAccelerator::Initialize(
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT_16X16;
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_S_FMT, &format);
// Subscribe to the resolution change event.
@@ -446,10 +373,10 @@ void ExynosVideoDecodeAccelerator::AssignPictureBuffers(
DVLOG(3) << "AssignPictureBuffers(): buffer_count=" << buffers.size();
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
- if (buffers.size() != gsc_output_buffer_map_.size()) {
+ if (buffers.size() != mfc_output_buffer_map_.size()) {
DLOG(ERROR) << "AssignPictureBuffers(): Failed to provide requested picture"
- " buffers. (Got " << buffers.size() << ", requested " <<
- gsc_output_buffer_map_.size() << ")";
+ " buffers. (Got " << buffers.size()
+ << ", requested " << mfc_output_buffer_map_.size() << ")";
NOTIFY_ERROR(INVALID_ARGUMENT);
return;
}
@@ -460,59 +387,45 @@ void ExynosVideoDecodeAccelerator::AssignPictureBuffers(
return;
}
- scoped_ptr<PictureBufferArrayRef> pic_buffers_ref(
- new PictureBufferArrayRef(egl_display_, buffers.size()));
-
- const static EGLint kImageAttrs[] = {
- EGL_IMAGE_PRESERVED_KHR, 0,
- EGL_NONE,
- };
- Display* x_display = base::MessagePumpForUI::GetDefaultXDisplay();
- gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_2D, 0);
- for (size_t i = 0; i < pic_buffers_ref->picture_buffers.size(); ++i) {
+ scoped_ptr<PictureBufferArrayRef> picture_buffers_ref(
+ new PictureBufferArrayRef(egl_display_));
+ gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
+ EGLint attrs[] = {
+ EGL_WIDTH, 0, EGL_HEIGHT, 0,
+ EGL_LINUX_DRM_FOURCC_EXT, 0, EGL_DMA_BUF_PLANE0_FD_EXT, 0,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0, EGL_DMA_BUF_PLANE0_PITCH_EXT, 0,
+ EGL_DMA_BUF_PLANE1_FD_EXT, 0, EGL_DMA_BUF_PLANE1_OFFSET_EXT, 0,
+ EGL_DMA_BUF_PLANE1_PITCH_EXT, 0, EGL_NONE, };
+ attrs[1] = frame_buffer_size_.width();
+ attrs[3] = frame_buffer_size_.height();
+ attrs[5] = DRM_FORMAT_NV12;
+ for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
DCHECK(buffers[i].size() == frame_buffer_size_);
- PictureBufferArrayRef::PictureBufferRef& buffer =
- pic_buffers_ref->picture_buffers[i];
- // Create the X pixmap and then create an EGLImageKHR from it, so we can
- // get dma_buf backing.
- Pixmap pixmap = XCreatePixmap(x_display,
- RootWindow(x_display, 0),
- frame_buffer_size_.width(),
- frame_buffer_size_.height(),
- 32);
- if (!pixmap) {
- DLOG(ERROR) << "AssignPictureBuffers(): could not create X pixmap";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
- glBindTexture(GL_TEXTURE_2D, buffers[i].texture_id());
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
+ attrs[7] = output_record.fds[0];
+ attrs[9] = 0;
+ attrs[11] = frame_buffer_size_.width();
+ attrs[13] = output_record.fds[1];
+ attrs[15] = 0;
+ attrs[17] = frame_buffer_size_.width();
EGLImageKHR egl_image = eglCreateImageKHR(
- egl_display_, EGL_NO_CONTEXT, EGL_NATIVE_PIXMAP_KHR,
- (EGLClientBuffer)pixmap, kImageAttrs);
- // We can free the X pixmap immediately -- according to the
- // EGL_KHR_image_base spec, the backing storage does not go away until the
- // last referencing EGLImage is destroyed.
- XFreePixmap(x_display, pixmap);
+ egl_display_, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attrs);
if (egl_image == EGL_NO_IMAGE_KHR) {
DLOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
- buffer.egl_image = egl_image;
- int fd;
- if (!mali_egl_image_get_buffer_ext_phandle(buffer.egl_image, NULL, &fd)) {
- DLOG(ERROR) << "AssignPictureBuffers(): "
- << "could not get EGLImageKHR dmabuf fd";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
- buffer.egl_image_fd = fd;
- glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image);
- buffer.client_id = buffers[i].id();
+
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, buffers[i].texture_id());
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image);
+ picture_buffers_ref->picture_buffers.push_back(
+ PictureBufferArrayRef::PictureBufferRef(egl_image, buffers[i].id()));
}
- decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &ExynosVideoDecodeAccelerator::AssignPictureBuffersTask,
- base::Unretained(this), base::Passed(&pic_buffers_ref)));
+ decoder_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&ExynosVideoDecodeAccelerator::AssignPictureBuffersTask,
+ base::Unretained(this),
+ base::Passed(&picture_buffers_ref)));
}
void ExynosVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) {
@@ -581,39 +494,6 @@ void ExynosVideoDecodeAccelerator::Destroy() {
bool ExynosVideoDecodeAccelerator::CanDecodeOnIOThread() { return true; }
-// static
-void ExynosVideoDecodeAccelerator::PreSandboxInitialization() {
- DVLOG(3) << "PreSandboxInitialization()";
- dlerror();
-
- libmali_handle = dlopen(kMaliDriver, RTLD_LAZY | RTLD_LOCAL);
- if (libmali_handle == NULL) {
- DPLOG(ERROR) << "failed to dlopen() " << kMaliDriver << ": " << dlerror();
- }
-}
-
-// static
-bool ExynosVideoDecodeAccelerator::PostSandboxInitialization() {
- DVLOG(3) << "PostSandboxInitialization()";
- if (libmali_handle == NULL) {
- DLOG(ERROR) << "PostSandboxInitialization(): no " << kMaliDriver
- << " driver handle";
- return false;
- }
-
- dlerror();
- mali_egl_image_get_buffer_ext_phandle =
- reinterpret_cast<MaliEglImageGetBufferExtPhandleFunc>(
- dlsym(libmali_handle, "mali_egl_image_get_buffer_ext_phandle"));
- if (mali_egl_image_get_buffer_ext_phandle == NULL) {
- DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym() "
- << "mali_egl_image_get_buffer_ext_phandle: " << dlerror();
- return false;
- }
-
- return true;
-}
-
void ExynosVideoDecodeAccelerator::DecodeTask(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOG(3) << "DecodeTask(): input_id=" << bitstream_buffer.id();
@@ -646,7 +526,7 @@ void ExynosVideoDecodeAccelerator::DecodeTask(
return;
}
- decoder_input_queue_.push_back(
+ decoder_input_queue_.push(
linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
decoder_decode_buffer_tasks_scheduled_++;
DecodeBufferTask();
@@ -684,7 +564,7 @@ void ExynosVideoDecodeAccelerator::DecodeBufferTask() {
// Setup to use the next buffer.
decoder_current_bitstream_buffer_.reset(buffer_ref.release());
- decoder_input_queue_.pop_front();
+ decoder_input_queue_.pop();
DVLOG(3) << "DecodeBufferTask(): reading input_id="
<< decoder_current_bitstream_buffer_->input_id
<< ", addr=" << (decoder_current_bitstream_buffer_->shm ?
@@ -1036,7 +916,7 @@ bool ExynosVideoDecodeAccelerator::FlushInputFrame() {
}
// Queue it to MFC.
- mfc_input_ready_queue_.push_back(decoder_current_input_buffer_);
+ mfc_input_ready_queue_.push(decoder_current_input_buffer_);
decoder_current_input_buffer_ = -1;
DVLOG(3) << "FlushInputFrame(): submitting input_id="
<< input_record.input_id;
@@ -1059,30 +939,28 @@ void ExynosVideoDecodeAccelerator::AssignPictureBuffersTask(
return;
}
- DCHECK_EQ(pic_buffers->picture_buffers.size(), gsc_output_buffer_map_.size());
- for (size_t i = 0; i < gsc_output_buffer_map_.size(); ++i) {
+ DCHECK_EQ(pic_buffers->picture_buffers.size(), mfc_output_buffer_map_.size());
+ for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
+ PictureBufferArrayRef::PictureBufferRef& buffer_ref =
+ pic_buffers->picture_buffers[i];
// We should be blank right now.
- GscOutputRecord& output_record = gsc_output_buffer_map_[i];
- DCHECK_EQ(output_record.fd, -1);
+ DCHECK(!output_record.at_device);
+ DCHECK(!output_record.at_client);
DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
- PictureBufferArrayRef::PictureBufferRef& buffer =
- pic_buffers->picture_buffers[i];
- output_record.fd = buffer.egl_image_fd;
- output_record.egl_image = buffer.egl_image;
- output_record.picture_id = buffer.client_id;
-
- // Take ownership of the EGLImage and fd.
- buffer.egl_image = EGL_NO_IMAGE_KHR;
- buffer.egl_image_fd = -1;
- // And add this buffer to the free list.
- gsc_free_output_buffers_.push_back(i);
+ output_record.egl_image = buffer_ref.egl_image;
+ output_record.picture_id = buffer_ref.picture_id;
+ mfc_free_output_buffers_.push(i);
+ DVLOG(3) << "AssignPictureBuffersTask(): buffer[" << i
+ << "]: picture_id=" << buffer_ref.picture_id;
}
+ pic_buffers->picture_buffers.clear();
- // We got buffers! Kick the GSC.
- EnqueueGsc();
+ // We got buffers! Kick the MFC.
+ EnqueueMfc();
if (decoder_state_ == kChangingResolution)
ResumeAfterResolutionChange();
@@ -1110,9 +988,7 @@ void ExynosVideoDecodeAccelerator::ServiceDeviceTask(bool mfc_event_pending) {
if (mfc_event_pending)
DequeueMfcEvents();
DequeueMfc();
- DequeueGsc();
EnqueueMfc();
- EnqueueGsc();
// Clear the interrupt fd.
if (!ClearDevicePollInterrupt())
@@ -1123,10 +999,6 @@ void ExynosVideoDecodeAccelerator::ServiceDeviceTask(bool mfc_event_pending) {
// MFC can be polled as soon as either input or output buffers are queued.
if (mfc_input_buffer_queued_count_ + mfc_output_buffer_queued_count_ > 0)
poll_fds |= kPollMfc;
- // Add GSC fd, if we should poll on it.
- // GSC has to wait until both input and output buffers are queued.
- if (gsc_input_buffer_queued_count_ > 0 && gsc_output_buffer_queued_count_ > 0)
- poll_fds |= kPollGsc;
// ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
// so either:
@@ -1149,14 +1021,7 @@ void ExynosVideoDecodeAccelerator::ServiceDeviceTask(bool mfc_event_pending) {
<< mfc_input_buffer_map_.size() << "->"
<< mfc_free_output_buffers_.size() << "+"
<< mfc_output_buffer_queued_count_ << "/"
- << mfc_output_buffer_map_.size() << "] => "
- << mfc_output_gsc_input_queue_.size() << " => GSC["
- << gsc_free_input_buffers_.size() << "+"
- << gsc_input_buffer_queued_count_ << "/"
- << gsc_input_buffer_map_.size() << "->"
- << gsc_free_output_buffers_.size() << "+"
- << gsc_output_buffer_queued_count_ << "/"
- << gsc_output_buffer_map_.size() << "] => VDA["
+ << mfc_output_buffer_map_.size() << "] => VDA["
<< decoder_frames_at_client_ << "]";
ScheduleDecodeBufferTaskIfNeeded();
@@ -1285,24 +1150,25 @@ void ExynosVideoDecodeAccelerator::DequeueMfc() {
}
MfcOutputRecord& output_record = mfc_output_buffer_map_[dqbuf.index];
DCHECK(output_record.at_device);
+ DCHECK(!output_record.at_client);
+ DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
+ DCHECK_NE(output_record.picture_id, -1);
output_record.at_device = false;
- output_record.bytes_used[0] = dqbuf.m.planes[0].bytesused;
- output_record.bytes_used[1] = dqbuf.m.planes[1].bytesused;
- if (output_record.bytes_used[0] + output_record.bytes_used[1] == 0) {
+ if (dqbuf.m.planes[0].bytesused + dqbuf.m.planes[1].bytesused == 0) {
// This is an empty output buffer returned as part of a flush.
- mfc_free_output_buffers_.push_back(dqbuf.index);
- output_record.input_id = -1;
+ mfc_free_output_buffers_.push(dqbuf.index);
} else {
- // This is an output buffer with contents to pass down the pipe.
- mfc_output_gsc_input_queue_.push_back(dqbuf.index);
- output_record.input_id = dqbuf.timestamp.tv_sec;
- DCHECK(output_record.input_id >= 0);
- DVLOG(3) << "DequeueMfc(): dequeued input_id=" << output_record.input_id;
- // We don't count this output buffer dequeued yet, or add it to the free
- // list, as it has data GSC needs to process.
-
- // We have new frames in mfc_output_gsc_input_queue_. Kick the pipe.
- SetDevicePollInterrupt();
+ DCHECK_GE(dqbuf.timestamp.tv_sec, 0);
+ output_record.at_client = true;
+ DVLOG(3) << "DequeueMfc(): returning input_id=" << dqbuf.timestamp.tv_sec
+ << " as picture_id=" << output_record.picture_id;
+ const media::Picture& picture =
+ media::Picture(output_record.picture_id, dqbuf.timestamp.tv_sec);
+ pending_picture_ready_.push(
+ PictureRecord(output_record.cleared, picture));
+ SendPictureReady();
+ output_record.cleared = true;
+ decoder_frames_at_client_++;
}
mfc_output_buffer_queued_count_--;
}
@@ -1310,144 +1176,12 @@ void ExynosVideoDecodeAccelerator::DequeueMfc() {
NotifyFlushDoneIfNeeded();
}
-void ExynosVideoDecodeAccelerator::EnqueueGsc() {
- DVLOG(3) << "EnqueueGsc()";
- DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
- DCHECK_NE(decoder_state_, kUninitialized);
- DCHECK_NE(decoder_state_, kInitialized);
- TRACE_EVENT0("Video Decoder", "EVDA::EnqueueGsc");
-
- // Drain the pipe of completed MFC output buffers.
- const int old_gsc_inputs_queued = gsc_input_buffer_queued_count_;
- while (!mfc_output_gsc_input_queue_.empty() &&
- !gsc_free_input_buffers_.empty()) {
- if (!EnqueueGscInputRecord())
- return;
- }
- if (old_gsc_inputs_queued == 0 && gsc_input_buffer_queued_count_ != 0) {
- // We just started up a previously empty queue.
- // Queue state changed; signal interrupt.
- if (!SetDevicePollInterrupt())
- return;
- // Start VIDIOC_STREAMON if we haven't yet.
- if (!gsc_input_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- IOCTL_OR_ERROR_RETURN(gsc_fd_, VIDIOC_STREAMON, &type);
- gsc_input_streamon_ = true;
- }
- }
-
- if (gsc_input_buffer_queued_count_ != 0 &&
- gsc_output_buffer_queued_count_ == 0 &&
- !gsc_free_output_buffers_.empty()) {
- const int old_gsc_outputs_queued = gsc_output_buffer_queued_count_;
- if (!EnqueueGscOutputRecord())
- return;
- if (old_gsc_outputs_queued == 0 && gsc_output_buffer_queued_count_ != 0) {
- // We just started up a previously empty queue.
- // Queue state changed; signal interrupt.
- if (!SetDevicePollInterrupt())
- return;
- // Start VIDIOC_STREAMON if we haven't yet.
- if (!gsc_output_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- IOCTL_OR_ERROR_RETURN(gsc_fd_, VIDIOC_STREAMON, &type);
- gsc_output_streamon_ = true;
- }
- }
- }
- // Bug check: GSC is liable to race conditions if more than one buffer is
- // simultaneously queued.
- DCHECK_GE(1, gsc_output_buffer_queued_count_);
-}
-
-void ExynosVideoDecodeAccelerator::DequeueGsc() {
- DVLOG(3) << "DequeueGsc()";
- DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
- DCHECK_NE(decoder_state_, kUninitialized);
- DCHECK_NE(decoder_state_, kInitialized);
- DCHECK_NE(decoder_state_, kAfterReset);
- TRACE_EVENT0("Video Decoder", "EVDA::DequeueGsc");
-
- // Dequeue completed GSC input (VIDEO_OUTPUT) buffers, and recycle to the free
- // list. Also recycle the corresponding MFC output buffers at this time.
- struct v4l2_buffer dqbuf;
- struct v4l2_plane planes[2];
- while (gsc_input_buffer_queued_count_ > 0) {
- DCHECK(gsc_input_streamon_);
- memset(&dqbuf, 0, sizeof(dqbuf));
- memset(planes, 0, sizeof(planes));
- dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- dqbuf.memory = V4L2_MEMORY_DMABUF;
- dqbuf.m.planes = planes;
- dqbuf.length = 2;
- if (ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf) != 0) {
- if (errno == EAGAIN) {
- // EAGAIN if we're just out of buffers to dequeue.
- break;
- }
- DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
- GscInputRecord& input_record = gsc_input_buffer_map_[dqbuf.index];
- MfcOutputRecord& output_record =
- mfc_output_buffer_map_[input_record.mfc_output];
- DCHECK(input_record.at_device);
- gsc_free_input_buffers_.push_back(dqbuf.index);
- mfc_free_output_buffers_.push_back(input_record.mfc_output);
- input_record.at_device = false;
- input_record.mfc_output = -1;
- output_record.input_id = -1;
- gsc_input_buffer_queued_count_--;
- }
-
- // Dequeue completed GSC output (VIDEO_CAPTURE) buffers, and send them off to
- // the client. Don't recycle to its free list yet -- we can't do that until
- // ReusePictureBuffer() returns it to us.
- while (gsc_output_buffer_queued_count_ > 0) {
- DCHECK(gsc_output_streamon_);
- memset(&dqbuf, 0, sizeof(dqbuf));
- memset(planes, 0, sizeof(planes));
- dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dqbuf.memory = V4L2_MEMORY_DMABUF;
- dqbuf.m.planes = planes;
- dqbuf.length = 1;
- if (ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf) != 0) {
- if (errno == EAGAIN) {
- // EAGAIN if we're just out of buffers to dequeue.
- break;
- }
- DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
- GscOutputRecord& output_record = gsc_output_buffer_map_[dqbuf.index];
- DCHECK(output_record.at_device);
- DCHECK(!output_record.at_client);
- DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
- output_record.at_device = false;
- output_record.at_client = true;
- gsc_output_buffer_queued_count_--;
- DVLOG(3) << "DequeueGsc(): returning input_id=" << dqbuf.timestamp.tv_sec
- << " as picture_id=" << output_record.picture_id;
- const media::Picture& picture =
- media::Picture(output_record.picture_id, dqbuf.timestamp.tv_sec);
- pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
- SendPictureReady();
- output_record.cleared = true;
- decoder_frames_at_client_++;
- }
-
- NotifyFlushDoneIfNeeded();
-}
-
bool ExynosVideoDecodeAccelerator::EnqueueMfcInputRecord() {
DVLOG(3) << "EnqueueMfcInputRecord()";
DCHECK(!mfc_input_ready_queue_.empty());
// Enqueue a MFC input (VIDEO_OUTPUT) buffer.
- const int buffer = mfc_input_ready_queue_.back();
+ const int buffer = mfc_input_ready_queue_.front();
MfcInputRecord& input_record = mfc_input_buffer_map_[buffer];
DCHECK(!input_record.at_device);
struct v4l2_buffer qbuf;
@@ -1462,7 +1196,7 @@ bool ExynosVideoDecodeAccelerator::EnqueueMfcInputRecord() {
qbuf.m.planes[0].bytesused = input_record.bytes_used;
qbuf.length = 1;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
- mfc_input_ready_queue_.pop_back();
+ mfc_input_ready_queue_.pop();
input_record.at_device = true;
mfc_input_buffer_queued_count_++;
DVLOG(3) << "EnqueueMfcInputRecord(): enqueued input_id="
@@ -1475,83 +1209,17 @@ bool ExynosVideoDecodeAccelerator::EnqueueMfcOutputRecord() {
DCHECK(!mfc_free_output_buffers_.empty());
// Enqueue a MFC output (VIDEO_CAPTURE) buffer.
- const int buffer = mfc_free_output_buffers_.back();
+ const int buffer = mfc_free_output_buffers_.front();
MfcOutputRecord& output_record = mfc_output_buffer_map_[buffer];
DCHECK(!output_record.at_device);
- DCHECK_EQ(output_record.input_id, -1);
- struct v4l2_buffer qbuf;
- struct v4l2_plane qbuf_planes[2];
- memset(&qbuf, 0, sizeof(qbuf));
- memset(qbuf_planes, 0, sizeof(qbuf_planes));
- qbuf.index = buffer;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- qbuf.memory = V4L2_MEMORY_MMAP;
- qbuf.m.planes = qbuf_planes;
- qbuf.length = 2;
- IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
- mfc_free_output_buffers_.pop_back();
- output_record.at_device = true;
- mfc_output_buffer_queued_count_++;
- return true;
-}
-
-bool ExynosVideoDecodeAccelerator::EnqueueGscInputRecord() {
- DVLOG(3) << "EnqueueGscInputRecord()";
- DCHECK(!gsc_free_input_buffers_.empty());
-
- // Enqueue a GSC input (VIDEO_OUTPUT) buffer for a complete MFC output
- // (VIDEO_CAPTURE) buffer.
- const int mfc_buffer = mfc_output_gsc_input_queue_.front();
- const int gsc_buffer = gsc_free_input_buffers_.back();
- MfcOutputRecord& output_record = mfc_output_buffer_map_[mfc_buffer];
- DCHECK(!output_record.at_device);
- GscInputRecord& input_record = gsc_input_buffer_map_[gsc_buffer];
- DCHECK(!input_record.at_device);
- DCHECK_EQ(input_record.mfc_output, -1);
- struct v4l2_buffer qbuf;
- struct v4l2_plane qbuf_planes[2];
- memset(&qbuf, 0, sizeof(qbuf));
- memset(qbuf_planes, 0, sizeof(qbuf_planes));
- qbuf.index = gsc_buffer;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- qbuf.timestamp.tv_sec = output_record.input_id;
- qbuf.memory = V4L2_MEMORY_USERPTR;
- qbuf.m.planes = qbuf_planes;
- qbuf.m.planes[0].bytesused = output_record.bytes_used[0];
- qbuf.m.planes[0].length = mfc_output_buffer_size_[0];
- qbuf.m.planes[0].m.userptr = (unsigned long)output_record.address[0];
- qbuf.m.planes[1].bytesused = output_record.bytes_used[1];
- qbuf.m.planes[1].length = mfc_output_buffer_size_[1];
- qbuf.m.planes[1].m.userptr = (unsigned long)output_record.address[1];
- qbuf.length = 2;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QBUF, &qbuf);
- mfc_output_gsc_input_queue_.pop_front();
- gsc_free_input_buffers_.pop_back();
- input_record.at_device = true;
- input_record.mfc_output = mfc_buffer;
- output_record.bytes_used[0] = 0;
- output_record.bytes_used[1] = 0;
- gsc_input_buffer_queued_count_++;
- DVLOG(3) << "EnqueueGscInputRecord(): enqueued input_id="
- << output_record.input_id;
- return true;
-}
-
-bool ExynosVideoDecodeAccelerator::EnqueueGscOutputRecord() {
- DVLOG(3) << "EnqueueGscOutputRecord()";
- DCHECK(!gsc_free_output_buffers_.empty());
-
- // Enqueue a GSC output (VIDEO_CAPTURE) buffer.
- const int buffer = gsc_free_output_buffers_.front();
- GscOutputRecord& output_record = gsc_output_buffer_map_[buffer];
- DCHECK(!output_record.at_device);
DCHECK(!output_record.at_client);
+ DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
+ DCHECK_NE(output_record.picture_id, -1);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
- TRACE_EVENT0(
- "Video Decoder",
- "EVDA::EnqueueGscOutputRecord: eglClientWaitSyncKHR");
+ TRACE_EVENT0("Video Decoder",
+ "EVDA::EnqueueMfcOutputRecord: eglClientWaitSyncKHR");
// If we have to wait for completion, wait. Note that
- // gsc_free_output_buffers_ is a FIFO queue, so we always wait on the
+ // mfc_free_output_buffers_ is a FIFO queue, so we always wait on the
// buffer that has been in the queue the longest.
eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0,
EGL_FOREVER_KHR);
@@ -1559,19 +1227,18 @@ bool ExynosVideoDecodeAccelerator::EnqueueGscOutputRecord() {
output_record.egl_sync = EGL_NO_SYNC_KHR;
}
struct v4l2_buffer qbuf;
- struct v4l2_plane qbuf_plane;
+ struct v4l2_plane qbuf_planes[arraysize(output_record.fds)];
memset(&qbuf, 0, sizeof(qbuf));
- memset(&qbuf_plane, 0, sizeof(qbuf_plane));
- qbuf.index = buffer;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- qbuf.memory = V4L2_MEMORY_DMABUF;
- qbuf.m.planes = &qbuf_plane;
- qbuf.m.planes[0].m.fd = output_record.fd;
- qbuf.length = 1;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QBUF, &qbuf);
- gsc_free_output_buffers_.pop_front();
+ memset(qbuf_planes, 0, sizeof(qbuf_planes));
+ qbuf.index = buffer;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ qbuf.memory = V4L2_MEMORY_MMAP;
+ qbuf.m.planes = qbuf_planes;
+ qbuf.length = arraysize(output_record.fds);
+ IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
+ mfc_free_output_buffers_.pop();
output_record.at_device = true;
- gsc_output_buffer_queued_count_++;
+ mfc_output_buffer_queued_count_++;
return true;
}
@@ -1594,17 +1261,17 @@ void ExynosVideoDecodeAccelerator::ReusePictureBufferTask(
}
size_t index;
- for (index = 0; index < gsc_output_buffer_map_.size(); ++index)
- if (gsc_output_buffer_map_[index].picture_id == picture_buffer_id)
+ for (index = 0; index < mfc_output_buffer_map_.size(); ++index)
+ if (mfc_output_buffer_map_[index].picture_id == picture_buffer_id)
break;
- if (index >= gsc_output_buffer_map_.size()) {
+ if (index >= mfc_output_buffer_map_.size()) {
DLOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not found";
NOTIFY_ERROR(INVALID_ARGUMENT);
return;
}
- GscOutputRecord& output_record = gsc_output_buffer_map_[index];
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[index];
if (output_record.at_device || !output_record.at_client) {
DLOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not reusable";
NOTIFY_ERROR(INVALID_ARGUMENT);
@@ -1614,12 +1281,12 @@ void ExynosVideoDecodeAccelerator::ReusePictureBufferTask(
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
output_record.at_client = false;
output_record.egl_sync = egl_sync_ref->egl_sync;
- gsc_free_output_buffers_.push_back(index);
+ mfc_free_output_buffers_.push(index);
decoder_frames_at_client_--;
// Take ownership of the EGLSync.
egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
- // We got a buffer back, so kick the GSC.
- EnqueueGsc();
+ // We got a buffer back, so kick the MFC.
+ EnqueueMfc();
}
void ExynosVideoDecodeAccelerator::FlushTask() {
@@ -1643,9 +1310,9 @@ void ExynosVideoDecodeAccelerator::FlushTask() {
DCHECK(!decoder_flushing_);
// Queue up an empty buffer -- this triggers the flush.
- decoder_input_queue_.push_back(linked_ptr<BitstreamBufferRef>(
- new BitstreamBufferRef(io_client_, io_message_loop_proxy_, NULL, 0,
- kFlushBufferId)));
+ decoder_input_queue_.push(
+ linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
+ io_client_, io_message_loop_proxy_, NULL, 0, kFlushBufferId)));
decoder_flushing_ = true;
SendPictureReady(); // Send all pending PictureReady.
@@ -1661,8 +1328,6 @@ void ExynosVideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
// * There is no currently filling input buffer.
// * MFC input holding queue is empty.
// * All MFC input (VIDEO_OUTPUT) buffers are returned.
- // * MFC -> GSC holding queue is empty.
- // * All GSC input (VIDEO_OUTPUT) buffers are returned.
if (!decoder_input_queue_.empty()) {
if (decoder_input_queue_.front()->input_id !=
decoder_delay_bitstream_buffer_id_)
@@ -1670,9 +1335,7 @@ void ExynosVideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
}
if (decoder_current_input_buffer_ != -1)
return;
- if ((mfc_input_ready_queue_.size() +
- mfc_input_buffer_queued_count_ + mfc_output_gsc_input_queue_.size() +
- gsc_input_buffer_queued_count_ + gsc_output_buffer_queued_count_ ) != 0)
+ if ((mfc_input_ready_queue_.size() + mfc_input_buffer_queued_count_) != 0)
return;
// TODO(posciak): crbug.com/270039. MFC requires a streamoff-streamon
@@ -1730,7 +1393,8 @@ void ExynosVideoDecodeAccelerator::ResetTask() {
resolution_change_pending_ = false;
decoder_current_bitstream_buffer_.reset();
- decoder_input_queue_.clear();
+ while (!decoder_input_queue_.empty())
+ decoder_input_queue_.pop();
decoder_current_input_buffer_ = -1;
@@ -1787,7 +1451,8 @@ void ExynosVideoDecodeAccelerator::DestroyTask() {
decoder_current_input_buffer_ = -1;
decoder_decode_buffer_tasks_scheduled_ = 0;
decoder_frames_at_client_ = 0;
- decoder_input_queue_.clear();
+ while (!decoder_input_queue_.empty())
+ decoder_input_queue_.pop();
decoder_flushing_ = false;
// Set our state to kError. Just in case.
@@ -1838,20 +1503,11 @@ bool ExynosVideoDecodeAccelerator::StopDevicePoll(bool keep_mfc_input_state) {
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_STREAMOFF, &type);
}
mfc_output_streamon_ = false;
- if (gsc_input_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_STREAMOFF, &type);
- }
- gsc_input_streamon_ = false;
- if (gsc_output_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_STREAMOFF, &type);
- }
- gsc_output_streamon_ = false;
// Reset all our accounting info.
if (!keep_mfc_input_state) {
- mfc_input_ready_queue_.clear();
+ while (!mfc_input_ready_queue_.empty())
+ mfc_input_ready_queue_.pop();
mfc_free_input_buffers_.clear();
for (size_t i = 0; i < mfc_input_buffer_map_.size(); ++i) {
mfc_free_input_buffers_.push_back(i);
@@ -1861,30 +1517,18 @@ bool ExynosVideoDecodeAccelerator::StopDevicePoll(bool keep_mfc_input_state) {
}
mfc_input_buffer_queued_count_ = 0;
}
- mfc_free_output_buffers_.clear();
+ while (!mfc_free_output_buffers_.empty())
+ mfc_free_output_buffers_.pop();
for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- mfc_free_output_buffers_.push_back(i);
- mfc_output_buffer_map_[i].at_device = false;
- mfc_output_buffer_map_[i].input_id = -1;
- }
- mfc_output_buffer_queued_count_ = 0;
- mfc_output_gsc_input_queue_.clear();
- gsc_free_input_buffers_.clear();
- for (size_t i = 0; i < gsc_input_buffer_map_.size(); ++i) {
- gsc_free_input_buffers_.push_back(i);
- gsc_input_buffer_map_[i].at_device = false;
- gsc_input_buffer_map_[i].mfc_output = -1;
- }
- gsc_input_buffer_queued_count_ = 0;
- gsc_free_output_buffers_.clear();
- for (size_t i = 0; i < gsc_output_buffer_map_.size(); ++i) {
- // Only mark those free that aren't being held by the VDA.
- if (!gsc_output_buffer_map_[i].at_client) {
- gsc_free_output_buffers_.push_back(i);
- gsc_output_buffer_map_[i].at_device = false;
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
+ // Only mark those free that aren't being held by the VDA client.
+ if (!output_record.at_client) {
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
+ mfc_free_output_buffers_.push(i);
+ mfc_output_buffer_map_[i].at_device = false;
}
}
- gsc_output_buffer_queued_count_ = 0;
+ mfc_output_buffer_queued_count_ = 0;
DVLOG(3) << "StopDevicePoll(): device poll stopped";
return true;
@@ -1928,13 +1572,7 @@ void ExynosVideoDecodeAccelerator::StartResolutionChangeIfNeeded() {
if (!resolution_change_pending_)
return;
- if (!mfc_output_gsc_input_queue_.empty() ||
- gsc_input_buffer_queued_count_ + gsc_output_buffer_queued_count_ > 0) {
- DVLOG(3) << "StartResolutionChangeIfNeeded(): waiting for GSC to finish.";
- return;
- }
-
- DVLOG(3) << "No more work for GSC, initiate resolution change";
+ DVLOG(3) << "No more work, initiate resolution change";
// Keep MFC input queue.
if (!StopDevicePoll(true))
@@ -1995,7 +1633,6 @@ void ExynosVideoDecodeAccelerator::ResumeAfterResolutionChange() {
return;
EnqueueMfc();
- // Gsc will get enqueued in AssignPictureBuffersTask().
ScheduleDecodeBufferTaskIfNeeded();
}
@@ -2024,14 +1661,6 @@ void ExynosVideoDecodeAccelerator::DevicePollTask(unsigned int poll_fds) {
mfc_pollfd = nfds;
nfds++;
}
- // Add GSC fd, if we should poll on it.
- // GSC has to wait until both input and output buffers are queued.
- if (poll_fds & kPollGsc) {
- DVLOG(3) << "DevicePollTask(): adding GSC to poll() set";
- pollfds[nfds].fd = gsc_fd_;
- pollfds[nfds].events = POLLIN | POLLOUT | POLLERR;
- nfds++;
- }
// Poll it!
if (HANDLE_EINTR(poll(pollfds, nfds, -1)) == -1) {
@@ -2108,15 +1737,12 @@ bool ExynosVideoDecodeAccelerator::CreateBuffersForFormat(
CHECK_EQ(format.fmt.pix_mp.num_planes, 2);
frame_buffer_size_.SetSize(
format.fmt.pix_mp.width, format.fmt.pix_mp.height);
- mfc_output_buffer_size_[0] = format.fmt.pix_mp.plane_fmt[0].sizeimage;
- mfc_output_buffer_size_[1] = format.fmt.pix_mp.plane_fmt[1].sizeimage;
mfc_output_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
- DCHECK_EQ(mfc_output_buffer_pixelformat_, V4L2_PIX_FMT_NV12MT_16X16);
+ DCHECK_EQ(mfc_output_buffer_pixelformat_, V4L2_PIX_FMT_NV12M);
DVLOG(3) << "CreateBuffersForFormat(): new resolution: "
<< frame_buffer_size_.ToString();
- if (!CreateMfcOutputBuffers() || !CreateGscInputBuffers() ||
- !CreateGscOutputBuffers())
+ if (!CreateMfcOutputBuffers())
return false;
return true;
@@ -2207,144 +1833,33 @@ bool ExynosVideoDecodeAccelerator::CreateMfcOutputBuffers() {
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_REQBUFS, &reqbufs);
- // Fill our free-buffers list, and create DMABUFs from them.
+ // Create DMABUFs from output buffers.
mfc_output_buffer_map_.resize(reqbufs.count);
for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- mfc_free_output_buffers_.push_back(i);
-
- // Query for the MEMORY_MMAP pointer.
- struct v4l2_plane planes[2];
- struct v4l2_buffer buffer;
- memset(&buffer, 0, sizeof(buffer));
- memset(planes, 0, sizeof(planes));
- buffer.index = i;
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- buffer.memory = V4L2_MEMORY_MMAP;
- buffer.m.planes = planes;
- buffer.length = 2;
- IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QUERYBUF, &buffer);
-
- // Get their user memory for GSC input.
- for (int j = 0; j < 2; ++j) {
- void* address = mmap(NULL, buffer.m.planes[j].length,
- PROT_READ | PROT_WRITE, MAP_SHARED, mfc_fd_,
- buffer.m.planes[j].m.mem_offset);
- if (address == MAP_FAILED) {
- DPLOG(ERROR) << "CreateMfcInputBuffers(): mmap() failed";
- return false;
- }
- mfc_output_buffer_map_[i].address[j] = address;
- mfc_output_buffer_map_[i].length[j] = buffer.m.planes[j].length;
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
+ for (size_t j = 0; j < arraysize(output_record.fds); ++j) {
+ // Export the DMABUF fd so we can export it as a texture.
+ struct v4l2_exportbuffer expbuf;
+ memset(&expbuf, 0, sizeof(expbuf));
+ expbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ expbuf.index = i;
+ expbuf.plane = j;
+ expbuf.flags = O_CLOEXEC;
+ IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_EXPBUF, &expbuf);
+ output_record.fds[j] = expbuf.fd;
}
}
- return true;
-}
-
-bool ExynosVideoDecodeAccelerator::CreateGscInputBuffers() {
- DVLOG(3) << "CreateGscInputBuffers()";
- DCHECK(decoder_state_ == kInitialized ||
- decoder_state_ == kChangingResolution);
- DCHECK(!gsc_input_streamon_);
- DCHECK(gsc_input_buffer_map_.empty());
-
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- format.fmt.pix_mp.width = frame_buffer_size_.width();
- format.fmt.pix_mp.height = frame_buffer_size_.height();
- format.fmt.pix_mp.pixelformat = mfc_output_buffer_pixelformat_;
- format.fmt.pix_mp.plane_fmt[0].sizeimage = mfc_output_buffer_size_[0];
- format.fmt.pix_mp.plane_fmt[1].sizeimage = mfc_output_buffer_size_[1];
- // NV12MT_16X16 is a tiled format for which bytesperline doesn't make too much
- // sense. Convention seems to be to assume 8bpp for these tiled formats.
- format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width();
- format.fmt.pix_mp.plane_fmt[1].bytesperline = frame_buffer_size_.width();
- format.fmt.pix_mp.num_planes = 2;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_FMT, &format);
-
- struct v4l2_control control;
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_ROTATE;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_HFLIP;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_VFLIP;
- control.value = 0;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
-
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_GLOBAL_ALPHA;
- control.value = 255;
- if (HANDLE_EINTR(ioctl(gsc_fd_, VIDIOC_S_CTRL, &control)) != 0) {
- memset(&control, 0, sizeof(control));
- control.id = V4L2_CID_ALPHA_COMPONENT;
- control.value = 255;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
- }
-
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = kGscInputBufferCount;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- reqbufs.memory = V4L2_MEMORY_USERPTR;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_REQBUFS, &reqbufs);
-
- gsc_input_buffer_map_.resize(reqbufs.count);
- for (size_t i = 0; i < gsc_input_buffer_map_.size(); ++i) {
- gsc_free_input_buffers_.push_back(i);
- gsc_input_buffer_map_[i].mfc_output = -1;
- }
-
- return true;
-}
-
-bool ExynosVideoDecodeAccelerator::CreateGscOutputBuffers() {
- DVLOG(3) << "CreateGscOutputBuffers()";
- DCHECK(decoder_state_ == kInitialized ||
- decoder_state_ == kChangingResolution);
- DCHECK(!gsc_output_streamon_);
- DCHECK(gsc_output_buffer_map_.empty());
-
- // GSC outputs into the EGLImages we create from the textures we are
- // assigned. Assume RGBA8888 format.
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- format.fmt.pix_mp.width = frame_buffer_size_.width();
- format.fmt.pix_mp.height = frame_buffer_size_.height();
- format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_RGB32;
- format.fmt.pix_mp.plane_fmt[0].sizeimage =
- frame_buffer_size_.width() * frame_buffer_size_.height() * 4;
- format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width() * 4;
- format.fmt.pix_mp.num_planes = 1;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_FMT, &format);
-
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = mfc_output_dpb_size_ + kDpbOutputBufferExtraCount;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- reqbufs.memory = V4L2_MEMORY_DMABUF;
- IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_REQBUFS, &reqbufs);
-
- // We don't actually fill in the freelist or the map here. That happens once
- // we have actual usable buffers, after AssignPictureBuffers();
- gsc_output_buffer_map_.resize(reqbufs.count);
-
- DVLOG(3) << "CreateGscOutputBuffers(): ProvidePictureBuffers(): "
- << "buffer_count=" << gsc_output_buffer_map_.size()
+ DVLOG(3) << "CreateMfcOutputBuffers(): ProvidePictureBuffers(): "
+ << "buffer_count=" << mfc_output_buffer_map_.size()
<< ", width=" << frame_buffer_size_.width()
<< ", height=" << frame_buffer_size_.height();
- child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind(
- &Client::ProvidePictureBuffers, client_, gsc_output_buffer_map_.size(),
- gfx::Size(frame_buffer_size_.width(), frame_buffer_size_.height()),
- GL_TEXTURE_2D));
+ child_message_loop_proxy_->PostTask(FROM_HERE,
+ base::Bind(&Client::ProvidePictureBuffers,
+ client_,
+ mfc_output_buffer_map_.size(),
+ frame_buffer_size_,
+ GL_TEXTURE_EXTERNAL_OES));
return true;
}
@@ -2378,13 +1893,32 @@ void ExynosVideoDecodeAccelerator::DestroyMfcOutputBuffers() {
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
DCHECK(!mfc_output_streamon_);
- for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- if (mfc_output_buffer_map_[i].address[0] != NULL)
- munmap(mfc_output_buffer_map_[i].address[0],
- mfc_output_buffer_map_[i].length[0]);
- if (mfc_output_buffer_map_[i].address[1] != NULL)
- munmap(mfc_output_buffer_map_[i].address[1],
- mfc_output_buffer_map_[i].length[1]);
+ if (mfc_output_buffer_map_.size() != 0) {
+ if (!make_context_current_.Run()) {
+ DLOG(ERROR) << "DestroyMfcOutputBuffers(): "
+ << "could not make context current";
+ } else {
+ size_t i = 0;
+ do {
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
+ for (size_t j = 0; j < arraysize(output_record.fds); ++j) {
+ if (output_record.fds[j] != -1)
+ HANDLE_EINTR(close(output_record.fds[j]));
+ if (output_record.egl_image != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display_, output_record.egl_image);
+ if (output_record.egl_sync != EGL_NO_SYNC_KHR)
+ eglDestroySyncKHR(egl_display_, output_record.egl_sync);
+ }
+ DVLOG(1) << "DestroyMfcOutputBuffers(): dismissing PictureBuffer id="
+ << output_record.picture_id;
+ child_message_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&Client::DismissPictureBuffer,
+ client_,
+ output_record.picture_id));
+ i++;
+ } while (i < mfc_output_buffer_map_.size());
+ }
}
struct v4l2_requestbuffers reqbufs;
@@ -2396,72 +1930,14 @@ void ExynosVideoDecodeAccelerator::DestroyMfcOutputBuffers() {
DPLOG(ERROR) << "DestroyMfcOutputBuffers() ioctl() failed: VIDIOC_REQBUFS";
mfc_output_buffer_map_.clear();
- mfc_free_output_buffers_.clear();
-}
-
-void ExynosVideoDecodeAccelerator::DestroyGscInputBuffers() {
- DVLOG(3) << "DestroyGscInputBuffers()";
- DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
- DCHECK(!gsc_input_streamon_);
-
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = 0;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- reqbufs.memory = V4L2_MEMORY_DMABUF;
- if (ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs) != 0)
- DPLOG(ERROR) << "DestroyGscInputBuffers(): ioctl() failed: VIDIOC_REQBUFS";
-
- gsc_input_buffer_map_.clear();
- gsc_free_input_buffers_.clear();
-}
-
-void ExynosVideoDecodeAccelerator::DestroyGscOutputBuffers() {
- DVLOG(3) << "DestroyGscOutputBuffers()";
- DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
- DCHECK(!gsc_output_streamon_);
-
- if (gsc_output_buffer_map_.size() != 0) {
- if (!make_context_current_.Run())
- DLOG(ERROR) << "DestroyGscOutputBuffers(): "
- << "could not make context current";
-
- size_t i = 0;
- do {
- GscOutputRecord& output_record = gsc_output_buffer_map_[i];
- if (output_record.fd != -1)
- HANDLE_EINTR(close(output_record.fd));
- if (output_record.egl_image != EGL_NO_IMAGE_KHR)
- eglDestroyImageKHR(egl_display_, output_record.egl_image);
- if (output_record.egl_sync != EGL_NO_SYNC_KHR)
- eglDestroySyncKHR(egl_display_, output_record.egl_sync);
- if (client_) {
- DVLOG(1) << "DestroyGscOutputBuffers(): "
- << "dismissing PictureBuffer id=" << output_record.picture_id;
- client_->DismissPictureBuffer(output_record.picture_id);
- }
- ++i;
- } while (i < gsc_output_buffer_map_.size());
- }
-
- struct v4l2_requestbuffers reqbufs;
- memset(&reqbufs, 0, sizeof(reqbufs));
- reqbufs.count = 0;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- reqbufs.memory = V4L2_MEMORY_DMABUF;
- if (ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs) != 0)
- DPLOG(ERROR) << "DestroyGscOutputBuffers(): ioctl() failed: VIDIOC_REQBUFS";
-
- gsc_output_buffer_map_.clear();
- gsc_free_output_buffers_.clear();
+ while (!mfc_free_output_buffers_.empty())
+ mfc_free_output_buffers_.pop();
}
void ExynosVideoDecodeAccelerator::ResolutionChangeDestroyBuffers() {
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
DVLOG(3) << "ResolutionChangeDestroyBuffers()";
- DestroyGscInputBuffers();
- DestroyGscOutputBuffers();
DestroyMfcOutputBuffers();
// Finish resolution change on decoder thread.
@@ -2471,6 +1947,7 @@ void ExynosVideoDecodeAccelerator::ResolutionChangeDestroyBuffers() {
}
void ExynosVideoDecodeAccelerator::SendPictureReady() {
+ DVLOG(3) << "SendPictureReady()";
DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
bool resetting_or_flushing =
(decoder_state_ == kResetting || decoder_flushing_);
diff --git a/content/common/gpu/media/exynos_video_decode_accelerator.h b/content/common/gpu/media/exynos_video_decode_accelerator.h
index 7d85ce1..7ccd594 100644
--- a/content/common/gpu/media/exynos_video_decode_accelerator.h
+++ b/content/common/gpu/media/exynos_video_decode_accelerator.h
@@ -8,7 +8,7 @@
#ifndef CONTENT_COMMON_GPU_MEDIA_EXYNOS_VIDEO_DECODE_ACCELERATOR_H_
#define CONTENT_COMMON_GPU_MEDIA_EXYNOS_VIDEO_DECODE_ACCELERATOR_H_
-#include <list>
+#include <queue>
#include <vector>
#include "base/callback_forward.h"
@@ -31,7 +31,7 @@ namespace content {
class H264Parser;
// This class handles Exynos video acceleration directly through the V4L2
-// devices exported by the Multi Format Codec and GScaler hardware blocks.
+// device exported by the Multi Format Codec hardware block.
//
// The threading model of this class is driven by the fact that it needs to
// interface two fundamentally different event queues -- the one Chromium
@@ -82,13 +82,6 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// VideoDecodeAcceleratorImpl implementation.
virtual bool CanDecodeOnIOThread() OVERRIDE;
- // Do any necessary initialization before the sandbox is enabled.
- static void PreSandboxInitialization();
-
- // Lazily initialize static data after sandbox is enabled. Return false on
- // init failure.
- static bool PostSandboxInitialization();
-
private:
// These are rather subjectively tuned.
enum {
@@ -96,7 +89,6 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// TODO(posciak): determine MFC input buffer size based on level limits.
// See http://crbug.com/255116.
kMfcInputBufferMaxSize = 1024 * 1024,
- kGscInputBufferCount = 4,
// Number of output buffers to use for each VDA stage above what's required
// by the decoder (e.g. DPB size, in H264). We need
// media::limits::kMaxVideoFrames to fill up the GpuVideoDecode pipeline,
@@ -123,7 +115,6 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// File descriptors we need to poll.
enum PollFds {
kPollMfc = (1 << 0),
- kPollGsc = (1 << 1),
};
// Auto-destruction reference for BitstreamBuffer, for message-passing from
@@ -144,45 +135,25 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
struct MfcInputRecord {
MfcInputRecord();
~MfcInputRecord();
- bool at_device; // held by device.
- void* address; // mmap() address.
- size_t length; // mmap() length.
- off_t bytes_used; // bytes filled in the mmap() segment.
- int32 input_id; // triggering input_id as given to Decode().
+ bool at_device; // held by device.
+ void* address; // mmap() address.
+ size_t length; // mmap() length.
+ off_t bytes_used; // bytes filled in the mmap() segment.
+ int32 input_id; // triggering input_id as given to Decode().
};
// Record for MFC output buffers.
struct MfcOutputRecord {
MfcOutputRecord();
~MfcOutputRecord();
- bool at_device; // held by device.
- size_t bytes_used[2]; // bytes used in each dmabuf.
- void* address[2]; // mmap() address for each plane.
- size_t length[2]; // mmap() length for each plane.
- int32 input_id; // triggering input_id as given to Decode().
- };
-
- // Record for GSC input buffers.
- struct GscInputRecord {
- GscInputRecord();
- ~GscInputRecord();
- bool at_device; // held by device.
- int mfc_output; // MFC output buffer index to recycle when this input
- // is complete.
- };
-
- // Record for GSC output buffers.
- struct GscOutputRecord {
- GscOutputRecord();
- ~GscOutputRecord();
- bool at_device; // held by device.
- bool at_client; // held by client.
- int fd; // file descriptor from backing EGLImage.
- EGLImageKHR egl_image; // backing EGLImage.
- EGLSyncKHR egl_sync; // sync the compositor's use of the EGLImage.
- int32 picture_id; // picture buffer id as returned to PictureReady().
- bool cleared; // Whether the texture is cleared and safe to render
- // from. See TextureManager for details.
+ bool at_device; // held by device.
+ bool at_client; // held by client.
+ int fds[2]; // file descriptors for each plane.
+ EGLImageKHR egl_image; // EGLImageKHR for the output buffer.
+ EGLSyncKHR egl_sync; // sync the compositor's use of the EGLImage.
+ int32 picture_id; // picture buffer id as returned to PictureReady().
+ bool cleared; // Whether the texture is cleared and safe to render
+ // from. See TextureManager for details.
};
//
@@ -226,15 +197,11 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// Handle the various device queues.
void EnqueueMfc();
void DequeueMfc();
- void EnqueueGsc();
- void DequeueGsc();
// Handle incoming MFC events.
void DequeueMfcEvents();
// Enqueue a buffer on the corresponding queue.
bool EnqueueMfcInputRecord();
bool EnqueueMfcOutputRecord();
- bool EnqueueGscInputRecord();
- bool EnqueueGscOutputRecord();
// Process a ReusePictureBuffer() API call. The API call create an EGLSync
// object on the main (GPU process) thread; we will record this object so we
@@ -278,7 +245,7 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// Try to get output format from MFC, detected after parsing the beginning
// of the stream. Sets |again| to true if more parsing is needed.
bool GetFormatInfo(struct v4l2_format* format, bool* again);
- // Create MFC output and GSC input and output buffers for the given |format|.
+ // Create MFC output buffers for the given |format|.
bool CreateBuffersForFormat(const struct v4l2_format& format);
//
@@ -308,8 +275,6 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// Create the buffers we need.
bool CreateMfcInputBuffers();
bool CreateMfcOutputBuffers();
- bool CreateGscInputBuffers();
- bool CreateGscOutputBuffers();
//
// Methods run on child thread.
@@ -318,8 +283,6 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// Destroy buffers.
void DestroyMfcInputBuffers();
void DestroyMfcOutputBuffers();
- void DestroyGscInputBuffers();
- void DestroyGscOutputBuffers();
void ResolutionChangeDestroyBuffers();
// Send decoded pictures to PictureReady.
@@ -384,7 +347,7 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
// Got a reset request while we were performing resolution change.
bool resolution_change_reset_pending_;
// Input queue for decoder_thread_: BitstreamBuffers in.
- std::list<linked_ptr<BitstreamBufferRef> > decoder_input_queue_;
+ std::queue<linked_ptr<BitstreamBufferRef> > decoder_input_queue_;
// For H264 decode, hardware requires that we send it frame-sized chunks.
// We'll need to parse the stream.
scoped_ptr<content::H264Parser> decoder_h264_parser_;
@@ -397,7 +360,7 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
//
// Completed decode buffers, waiting for MFC.
- std::list<int> mfc_input_ready_queue_;
+ std::queue<int> mfc_input_ready_queue_;
// MFC decode device.
int mfc_fd_;
@@ -415,40 +378,16 @@ class CONTENT_EXPORT ExynosVideoDecodeAccelerator
bool mfc_output_streamon_;
// MFC output buffers enqueued to device.
int mfc_output_buffer_queued_count_;
- // Output buffers ready to use, as a LIFO since we don't care about ordering.
- std::vector<int> mfc_free_output_buffers_;
+ // Output buffers ready to use, as a FIFO since we want oldest-first to hide
+ // synchronization latency with GL.
+ std::queue<int> mfc_free_output_buffers_;
// Mapping of int index to MFC output buffer record.
std::vector<MfcOutputRecord> mfc_output_buffer_map_;
- // Required size of MFC output buffers. Two sizes for two planes.
- size_t mfc_output_buffer_size_[2];
+ // MFC output pixel format.
uint32 mfc_output_buffer_pixelformat_;
// Required size of DPB for decoding.
int mfc_output_dpb_size_;
- // Completed MFC outputs, waiting for GSC.
- std::list<int> mfc_output_gsc_input_queue_;
-
- // GSC decode device.
- int gsc_fd_;
-
- // GSC input buffer state.
- bool gsc_input_streamon_;
- // GSC input buffers enqueued to device.
- int gsc_input_buffer_queued_count_;
- // Input buffers ready to use, as a LIFO since we don't care about ordering.
- std::vector<int> gsc_free_input_buffers_;
- // Mapping of int index to GSC input buffer record.
- std::vector<GscInputRecord> gsc_input_buffer_map_;
-
- // GSC output buffer state.
- bool gsc_output_streamon_;
- // GSC output buffers enqueued to device.
- int gsc_output_buffer_queued_count_;
- // Output buffers ready to use. We need a FIFO here.
- std::list<int> gsc_free_output_buffers_;
- // Mapping of int index to GSC output buffer record.
- std::vector<GscOutputRecord> gsc_output_buffer_map_;
-
// Pictures that are ready but not sent to PictureReady yet.
std::queue<PictureRecord> pending_picture_ready_;
diff --git a/content/common/gpu/media/rendering_helper.cc b/content/common/gpu/media/rendering_helper.cc
index b0f540e..c18cffc 100644
--- a/content/common/gpu/media/rendering_helper.cc
+++ b/content/common/gpu/media/rendering_helper.cc
@@ -330,13 +330,21 @@ void RenderingHelper::Initialize(const RenderingHelperParams& params,
});
#if GL_VARIANT_EGL
- static const char kFragmentShader[] = STRINGIZE(
- precision mediump float;
- varying vec2 interp_tc;
- uniform sampler2D tex;
- void main() {
- gl_FragColor = texture2D(tex, interp_tc);
- });
+ static const char kFragmentShader[] =
+ "#extension GL_OES_EGL_image_external : enable\n"
+ "precision mediump float;\n"
+ "varying vec2 interp_tc;\n"
+ "uniform sampler2D tex;\n"
+ "#ifdef GL_OES_EGL_image_external\n"
+ "uniform samplerExternalOES tex_external;\n"
+ "#endif\n"
+ "void main() {\n"
+ " vec4 color = texture2D(tex, interp_tc);\n"
+ "#ifdef GL_OES_EGL_image_external\n"
+ " color += texture2D(tex_external, interp_tc);\n"
+ "#endif\n"
+ " gl_FragColor = color;\n"
+ "}\n";
#else
static const char kFragmentShader[] = STRINGIZE(
varying vec2 interp_tc;
@@ -365,6 +373,10 @@ void RenderingHelper::Initialize(const RenderingHelperParams& params,
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 0);
glUniform1i(glGetUniformLocation(program_, "tex"), 0);
+ GLint tex_external = glGetUniformLocation(program_, "tex_external");
+ if (tex_external != -1) {
+ glUniform1i(tex_external, 1);
+ }
int pos_location = glGetAttribLocation(program_, "in_pos");
glEnableVertexAttribArray(pos_location);
glVertexAttribPointer(pos_location, 2, GL_FLOAT, GL_FALSE, 0, kVertices);
@@ -406,32 +418,33 @@ void RenderingHelper::CreateTexture(int window_id,
window_id, texture_target, texture_id, done));
return;
}
- CHECK_EQ(static_cast<uint32>(GL_TEXTURE_2D), texture_target);
MakeCurrent(window_id);
glGenTextures(1, texture_id);
- glBindTexture(GL_TEXTURE_2D, *texture_id);
+ glBindTexture(texture_target, *texture_id);
int dimensions_id = window_id % frame_dimensions_.size();
- glTexImage2D(GL_TEXTURE_2D,
- 0,
- GL_RGBA,
- frame_dimensions_[dimensions_id].width(),
- frame_dimensions_[dimensions_id].height(),
- 0,
- GL_RGBA,
- GL_UNSIGNED_BYTE,
- NULL);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ if (texture_target == GL_TEXTURE_2D) {
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ frame_dimensions_[dimensions_id].width(),
+ frame_dimensions_[dimensions_id].height(),
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ NULL);
+ }
+ glTexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// OpenGLES2.0.25 section 3.8.2 requires CLAMP_TO_EDGE for NPOT textures.
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
CHECK(texture_id_to_surface_index_.insert(
std::make_pair(*texture_id, window_id)).second);
done->Signal();
}
-void RenderingHelper::RenderTexture(uint32 texture_id) {
+void RenderingHelper::RenderTexture(uint32 texture_target, uint32 texture_id) {
CHECK_EQ(base::MessageLoop::current(), message_loop_);
size_t window_id = texture_id_to_surface_index_[texture_id];
MakeCurrent(window_id);
@@ -460,8 +473,19 @@ void RenderingHelper::RenderTexture(uint32 texture_id) {
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 1);
}
- glActiveTexture(GL_TEXTURE0);
- glBindTexture(GL_TEXTURE_2D, texture_id);
+ // Unbound texture samplers default to (0, 0, 0, 1). Use this fact to switch
+ // between GL_TEXTURE_2D and GL_TEXTURE_EXTERNAL_OES as appopriate.
+ if (texture_target == GL_TEXTURE_2D) {
+ glActiveTexture(GL_TEXTURE0 + 0);
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glActiveTexture(GL_TEXTURE0 + 1);
+ glBindTexture(texture_target, 0);
+ } else if (texture_target == GL_TEXTURE_EXTERNAL_OES) {
+ glActiveTexture(GL_TEXTURE0 + 0);
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glActiveTexture(GL_TEXTURE0 + 1);
+ glBindTexture(texture_target, texture_id);
+ }
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
@@ -473,7 +497,10 @@ void RenderingHelper::RenderTexture(uint32 texture_id) {
glBindFramebufferEXT(GL_FRAMEBUFFER, 0);
glViewport(0, 0, width, height);
glScissor(0, 0, width, height);
+ glActiveTexture(GL_TEXTURE0 + 0);
glBindTexture(GL_TEXTURE_2D, thumbnails_texture_id_);
+ glActiveTexture(GL_TEXTURE0 + 1);
+ glBindTexture(texture_target, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
@@ -552,6 +579,7 @@ void RenderingHelper::GetThumbnailsAsRGB(std::vector<unsigned char>* rgb,
GL_RGBA,
GL_UNSIGNED_BYTE,
&rgba[0]);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, 0);
rgb->resize(num_pixels * 3);
// Drop the alpha channel, but check as we go that it is all 0xff.
bool solid = true;
diff --git a/content/common/gpu/media/rendering_helper.h b/content/common/gpu/media/rendering_helper.h
index aee495a..7890b53 100644
--- a/content/common/gpu/media/rendering_helper.h
+++ b/content/common/gpu/media/rendering_helper.h
@@ -69,8 +69,8 @@ class RenderingHelper {
uint32* texture_id,
base::WaitableEvent* done);
- // Render |texture_id| to the screen.
- void RenderTexture(uint32 texture_id);
+ // Render |texture_id| to the screen using target |texture_target|.
+ void RenderTexture(uint32 texture_target, uint32 texture_id);
// Delete |texture_id|.
void DeleteTexture(uint32 texture_id);
diff --git a/content/common/gpu/media/video_decode_accelerator_unittest.cc b/content/common/gpu/media/video_decode_accelerator_unittest.cc
index e0d938d..1297c03 100644
--- a/content/common/gpu/media/video_decode_accelerator_unittest.cc
+++ b/content/common/gpu/media/video_decode_accelerator_unittest.cc
@@ -453,6 +453,7 @@ class GLRenderingVDAClient
PictureBufferById picture_buffers_by_id_;
base::TimeTicks initialize_done_ticks_;
int profile_;
+ GLenum texture_target_;
bool suppress_rendering_;
std::vector<base::TimeTicks> frame_delivery_times_;
int delay_reuse_after_frame_num_;
@@ -500,6 +501,7 @@ GLRenderingVDAClient::GLRenderingVDAClient(
num_decoded_frames_(0),
num_done_bitstream_buffers_(0),
profile_(profile),
+ texture_target_(0),
suppress_rendering_(suppress_rendering),
delay_reuse_after_frame_num_(delay_reuse_after_frame_num),
decode_calls_per_second_(decode_calls_per_second) {
@@ -576,12 +578,13 @@ void GLRenderingVDAClient::ProvidePictureBuffers(
return;
std::vector<media::PictureBuffer> buffers;
+ texture_target_ = texture_target;
for (uint32 i = 0; i < requested_num_of_buffers; ++i) {
uint32 id = picture_buffers_by_id_.size();
uint32 texture_id;
base::WaitableEvent done(false, false);
rendering_helper_->CreateTexture(
- rendering_window_id_, texture_target, &texture_id, &done);
+ rendering_window_id_, texture_target_, &texture_id, &done);
done.Wait();
CHECK(outstanding_texture_ids_.insert(texture_id).second);
media::PictureBuffer* buffer =
@@ -636,7 +639,8 @@ void GLRenderingVDAClient::PictureReady(const media::Picture& picture) {
picture_buffers_by_id_[picture.picture_buffer_id()];
CHECK(picture_buffer);
if (!suppress_rendering_) {
- rendering_helper_->RenderTexture(picture_buffer->texture_id());
+ rendering_helper_->RenderTexture(texture_target_,
+ picture_buffer->texture_id());
}
if (num_decoded_frames() > delay_reuse_after_frame_num_) {
@@ -1517,9 +1521,7 @@ int main(int argc, char **argv) {
#if defined(OS_WIN)
content::DXVAVideoDecodeAccelerator::PreSandboxInitialization();
#elif defined(OS_CHROMEOS)
-#if defined(ARCH_CPU_ARMEL)
- content::ExynosVideoDecodeAccelerator::PreSandboxInitialization();
-#elif defined(ARCH_CPU_X86_FAMILY)
+#if defined(ARCH_CPU_X86_FAMILY)
content::VaapiWrapper::PreSandboxInitialization();
#endif // ARCH_CPU_ARMEL
#endif // OS_CHROMEOS
diff --git a/content/gpu/gpu_main.cc b/content/gpu/gpu_main.cc
index 0bed8c2..1aee08f 100644
--- a/content/gpu/gpu_main.cc
+++ b/content/gpu/gpu_main.cc
@@ -361,9 +361,7 @@ bool WarmUpSandbox(const CommandLine& command_line) {
}
}
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
- ExynosVideoDecodeAccelerator::PreSandboxInitialization();
-#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
VaapiWrapper::PreSandboxInitialization();
#endif