summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormlloyd@chromium.org <mlloyd@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 14:13:05 +0000
committermlloyd@chromium.org <mlloyd@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-17 14:13:05 +0000
commit425bbb99f4faa4bdf90d9bca09174baa467aeaae (patch)
tree381893ee554de8b10f377bdd7bf7a4037f0183f1
parent6d0593200cd191372b5fefe26fedb1dea2b15a64 (diff)
downloadchromium_src-425bbb99f4faa4bdf90d9bca09174baa467aeaae.zip
chromium_src-425bbb99f4faa4bdf90d9bca09174baa467aeaae.tar.gz
chromium_src-425bbb99f4faa4bdf90d9bca09174baa467aeaae.tar.bz2
Revert 59785 - Implement FakeGlVideoDecodeEngine using FakeGlVideoDecodeContext
Defines UploadToVideoFrame in VideoDecodeContext. FakeGlVideoDecodeEngine now uses FakeGlVideoDecodeContext to video frame allocation and uploading. BUG=53714 TEST=Tree is green Review URL: http://codereview.chromium.org/3312022 TBR=hclam@chromium.org Review URL: http://codereview.chromium.org/3436014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59789 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/chrome.gyp2
-rw-r--r--chrome/gpu/gpu_video_decoder.cc40
-rw-r--r--chrome/gpu/gpu_video_decoder.h21
-rw-r--r--chrome/gpu/media/fake_gl_video_decode_engine.cc50
-rw-r--r--chrome/gpu/media/fake_gl_video_decode_engine.h20
-rw-r--r--chrome/gpu/media/fake_gl_video_device.cc60
-rw-r--r--chrome/gpu/media/fake_gl_video_device.h27
-rw-r--r--chrome/gpu/media/gpu_video_device.h17
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.cc5
-rw-r--r--chrome/renderer/media/gles2_video_decode_context.h5
-rw-r--r--media/filters/ffmpeg_video_decoder.cc2
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc8
-rw-r--r--media/filters/omx_video_decoder.cc2
-rw-r--r--media/mf/mft_h264_decoder.cc1
-rw-r--r--media/mf/mft_h264_decoder.h3
-rw-r--r--media/mf/mft_h264_decoder_example.cc6
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc22
-rw-r--r--media/tools/omx_test/omx_test.cc2
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc1
-rw-r--r--media/video/ffmpeg_video_decode_engine.h1
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc8
-rw-r--r--media/video/omx_video_decode_engine.cc1
-rw-r--r--media/video/omx_video_decode_engine.h1
-rw-r--r--media/video/video_decode_context.h28
-rw-r--r--media/video/video_decode_engine.h17
25 files changed, 64 insertions, 286 deletions
diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp
index b9ede11..d561e3a 100644
--- a/chrome/chrome.gyp
+++ b/chrome/chrome.gyp
@@ -721,8 +721,6 @@
'gpu/media/gpu_video_device.h',
'gpu/media/fake_gl_video_decode_engine.cc',
'gpu/media/fake_gl_video_decode_engine.h',
- 'gpu/media/fake_gl_video_device.cc',
- 'gpu/media/fake_gl_video_device.h',
],
'include_dirs': [
'..',
diff --git a/chrome/gpu/gpu_video_decoder.cc b/chrome/gpu/gpu_video_decoder.cc
index bf5881ed..eb1823a 100644
--- a/chrome/gpu/gpu_video_decoder.cc
+++ b/chrome/gpu/gpu_video_decoder.cc
@@ -4,11 +4,9 @@
#include "chrome/gpu/gpu_video_decoder.h"
-#include "chrome/common/child_thread.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_channel.h"
#include "chrome/gpu/media/fake_gl_video_decode_engine.h"
-#include "chrome/gpu/media/fake_gl_video_device.h"
#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
@@ -112,10 +110,6 @@ void GpuVideoDecoder::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
}
void* GpuVideoDecoder::GetDevice() {
- bool ret = gles2_decoder_->MakeCurrent();
- DCHECK(ret) << "Failed to switch context";
-
- // Simply delegate the method call to GpuVideoDevice.
return video_device_->GetDevice();
}
@@ -161,9 +155,6 @@ void GpuVideoDecoder::ReleaseAllVideoFrames() {
//
// And finally we'll send IPC commands to IpcVideoDecoder to destroy all
// GL textures generated.
- bool ret = gles2_decoder_->MakeCurrent();
- DCHECK(ret) << "Failed to switch context";
-
for (VideoFrameMap::iterator i = video_frame_map_.begin();
i != video_frame_map_.end(); ++i) {
video_device_->ReleaseVideoFrame(i->second);
@@ -172,22 +163,6 @@ void GpuVideoDecoder::ReleaseAllVideoFrames() {
SendReleaseAllVideoFrames();
}
-void GpuVideoDecoder::UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame,
- Task* task) {
- // This method is called by VideoDecodeEngine to upload a buffer to a
- // VideoFrame. We should just delegate this to GpuVideoDevice which contains
- // the actual implementation.
- bool ret = gles2_decoder_->MakeCurrent();
- DCHECK(ret) << "Failed to switch context";
-
- // Actually doing the upload on the main thread.
- ret = video_device_->UploadToVideoFrame(buffer, frame);
- DCHECK(ret) << "Failed to upload video content to a VideoFrame.";
- task->Run();
- delete task;
-}
-
void GpuVideoDecoder::Destroy(Task* task) {
// TODO(hclam): I still need to think what I should do here.
}
@@ -208,17 +183,17 @@ GpuVideoDecoder::GpuVideoDecoder(
// TODO(jiesun): find a better way to determine which VideoDecodeEngine
// to return on current platform.
decode_engine_.reset(new FakeGlVideoDecodeEngine());
- video_device_.reset(new FakeGlVideoDevice());
}
void GpuVideoDecoder::OnInitialize(const GpuVideoDecoderInitParam& param) {
// TODO(hclam): Initialize the VideoDecodeContext first.
+
// TODO(jiesun): codec id should come from |param|.
config_.codec = media::kCodecH264;
config_.width = param.width;
config_.height = param.height;
config_.opaque_context = NULL;
- decode_engine_->Initialize(NULL, this, this, config_);
+ decode_engine_->Initialize(NULL, this, config_);
}
void GpuVideoDecoder::OnUninitialize() {
@@ -249,6 +224,8 @@ void GpuVideoDecoder::OnEmptyThisBuffer(
void GpuVideoDecoder::OnFillThisBuffer(
const GpuVideoDecoderOutputBufferParam& param) {
// Switch context before calling to the decode engine.
+ // TODO(hclam): This is temporary to allow FakeGlVideoDecodeEngine to issue
+ // GL commands correctly.
bool ret = gles2_decoder_->MakeCurrent();
DCHECK(ret) << "Failed to switch context";
@@ -275,18 +252,19 @@ void GpuVideoDecoder::OnVideoFrameAllocated(int32 frame_id,
// GpuVideoDevice. The VideoFrame created is added to the internal map.
// If we have generated enough VideoFrame, we call |allocation_callack_| to
// complete the allocation process.
+ media::VideoFrame::GlTexture gl_textures[media::VideoFrame::kMaxPlanes];
+ memset(gl_textures, 0, sizeof(gl_textures));
for (size_t i = 0; i < textures.size(); ++i) {
- media::VideoFrame::GlTexture gl_texture;
// Translate the client texture id to service texture id.
- bool ret = gles2_decoder_->GetServiceTextureId(textures[i], &gl_texture);
+ bool ret = gles2_decoder_->GetServiceTextureId(textures[i],
+ gl_textures + i);
DCHECK(ret) << "Cannot translate client texture ID to service ID";
- textures[i] = gl_texture;
}
scoped_refptr<media::VideoFrame> frame;
bool ret = video_device_->CreateVideoFrameFromGlTextures(
pending_allocation_->width, pending_allocation_->height,
- pending_allocation_->format, textures, &frame);
+ pending_allocation_->format, gl_textures, &frame);
DCHECK(ret) << "Failed to allocation VideoFrame from GL textures)";
pending_allocation_->frames->push_back(frame);
diff --git a/chrome/gpu/gpu_video_decoder.h b/chrome/gpu/gpu_video_decoder.h
index d4c9b09..9f77700 100644
--- a/chrome/gpu/gpu_video_decoder.h
+++ b/chrome/gpu/gpu_video_decoder.h
@@ -39,7 +39,7 @@ class GpuChannel;
// In addition to delegating video related commamnds to VideoDecodeEngine it
// has the following important functions:
//
-// BUFFER ALLOCATION
+// Buffer Allocation
//
// VideoDecodeEngine requires platform specific video frame buffer to operate.
// In order to abstract the platform specific bits GpuVideoDecoderContext is
@@ -66,20 +66,12 @@ class GpuChannel;
// VideoFrame(s) from the textures.
// 6. GpuVideoDecoder sends the VideoFrame(s) generated to VideoDecodeEngine.
//
-// BUFFER UPLOADING
-//
-// A VideoDecodeEngine always produces some device specific buffer. In order to
-// use them in Chrome we always upload them to GL textures. The upload step is
-// different on each platform and each subsystem. We perform these special
-// upload steps by using GpuVideoDevice which are written for each
-// VideoDecodeEngine.
-//
-// BUFFER MAPPING
+// Buffer Translation
//
// GpuVideoDecoder will be working with VideoDecodeEngine, they exchange
-// buffers that are only meaningful to VideoDecodeEngine. In order to map that
-// to something we can transport in the IPC channel we need a mapping between
-// VideoFrame and buffer ID known between GpuVideoDecoder and
+// buffers that are only meaningful to VideoDecodeEngine. In order to translate
+// that to something we can transport in the IPC channel we need a mapping
+// between VideoFrame and buffer ID known between GpuVideoDecoder and
// GpuVideoDecoderHost in the Renderer process.
//
// After texture allocation and VideoFrame allocation are done, GpuVideoDecoder
@@ -113,9 +105,6 @@ class GpuVideoDecoder
int n, size_t width, size_t height, media::VideoFrame::Format format,
std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
virtual void ReleaseAllVideoFrames();
- virtual void UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame,
- Task* task);
virtual void Destroy(Task* task);
// Constructor and destructor.
diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.cc b/chrome/gpu/media/fake_gl_video_decode_engine.cc
index b3d093f..52e2dfd 100644
--- a/chrome/gpu/media/fake_gl_video_decode_engine.cc
+++ b/chrome/gpu/media/fake_gl_video_decode_engine.cc
@@ -4,8 +4,7 @@
#include "chrome/gpu/media/fake_gl_video_decode_engine.h"
-#include "media/base/video_frame.h"
-#include "media/video/video_decode_context.h"
+#include "app/gfx/gl/gl_bindings.h"
FakeGlVideoDecodeEngine::FakeGlVideoDecodeEngine()
: width_(0),
@@ -19,33 +18,11 @@ FakeGlVideoDecodeEngine::~FakeGlVideoDecodeEngine() {
void FakeGlVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
media::VideoDecodeEngine::EventHandler* event_handler,
- media::VideoDecodeContext* context,
const media::VideoCodecConfig& config) {
handler_ = event_handler;
- context_ = context;
width_ = config.width;
height_ = config.height;
- // Create an internal VideoFrame that we can write to. This is going to be
- // uploaded through VideoDecodeContext.
- media::VideoFrame::CreateFrame(
- media::VideoFrame::RGBA, width_, height_, base::TimeDelta(),
- base::TimeDelta(), &internal_frame_);
- memset(internal_frame_->data(media::VideoFrame::kRGBPlane), 0,
- height_ * internal_frame_->stride(media::VideoFrame::kRGBPlane));
-
- // Use VideoDecodeContext to allocate VideoFrame that can be consumed by
- // external body.
- context_->AllocateVideoFrames(
- 1, width_, height_, media::VideoFrame::RGBA, &external_frames_,
- NewRunnableMethod(this,
- &FakeGlVideoDecodeEngine::AllocationCompleteTask));
-}
-
-void FakeGlVideoDecodeEngine::AllocationCompleteTask() {
- DCHECK_EQ(1u, external_frames_.size());
- DCHECK_EQ(media::VideoFrame::TYPE_GL_TEXTURE, external_frames_[0]->type());
-
media::VideoCodecInfo info;
info.success = true;
info.provides_buffers = true;
@@ -53,6 +30,9 @@ void FakeGlVideoDecodeEngine::AllocationCompleteTask() {
info.stream_info.surface_type = media::VideoFrame::TYPE_GL_TEXTURE;
info.stream_info.surface_width = width_;
info.stream_info.surface_height = height_;
+
+ // TODO(hclam): When we have VideoDecodeContext we should use it to allocate
+ // video frames.
handler_->OnInitializeComplete(info);
}
@@ -82,7 +62,7 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame(
scoped_array<uint8> buffer(new uint8[size]);
memset(buffer.get(), 0, size);
- uint8* row = internal_frame_->data(media::VideoFrame::kRGBPlane);
+ uint8* row = buffer.get();
static int seed = 0;
for (int y = 0; y < height_; ++y) {
@@ -95,18 +75,14 @@ void FakeGlVideoDecodeEngine::ProduceVideoFrame(
}
++seed;
- // After we have filled the content upload the internal frame to the
- // VideoFrame allocated through VideoDecodeContext.
- context_->UploadToVideoFrame(
- internal_frame_, external_frames_[0],
- NewRunnableMethod(this, &FakeGlVideoDecodeEngine::UploadCompleteTask,
- external_frames_[0]));
-}
+ // Assume we are in the right context and then upload the content to the
+ // texture.
+ glBindTexture(GL_TEXTURE_2D,
+ frame->gl_texture(media::VideoFrame::kRGBPlane));
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width_, height_, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, buffer.get());
-void FakeGlVideoDecodeEngine::UploadCompleteTask(
- scoped_refptr<media::VideoFrame> frame) {
- // |frame| is the upload target. We can immediately send this frame out.
+ // We have done generating data to the frame so give it to the handler.
+ // TODO(hclam): Advance the timestamp every time we call this method.
handler_->ConsumeVideoFrame(frame);
}
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(FakeGlVideoDecodeEngine);
diff --git a/chrome/gpu/media/fake_gl_video_decode_engine.h b/chrome/gpu/media/fake_gl_video_decode_engine.h
index 164c8c4..c3eeb3e 100644
--- a/chrome/gpu/media/fake_gl_video_decode_engine.h
+++ b/chrome/gpu/media/fake_gl_video_decode_engine.h
@@ -5,13 +5,10 @@
#ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_
#define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DECODE_ENGINE_H_
-#include <vector>
-
#include "base/scoped_ptr.h"
#include "media/video/video_decode_engine.h"
namespace media {
-class VideoDecodeContext;
class VideoFrame;
} // namespace media
@@ -23,7 +20,6 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine {
virtual void Initialize(
MessageLoop* message_loop,
media::VideoDecodeEngine::EventHandler* event_handler,
- media::VideoDecodeContext* context,
const media::VideoCodecConfig& config);
virtual void Uninitialize();
@@ -33,25 +29,9 @@ class FakeGlVideoDecodeEngine : public media::VideoDecodeEngine {
virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> frame);
private:
- // This method is called when video frames allocation is completed by
- // VideoDecodeContext.
- void AllocationCompleteTask();
-
- // This method is called by VideoDecodeContext when uploading to a VideoFrame
- // has completed.
- void UploadCompleteTask(scoped_refptr<media::VideoFrame> frame);
-
int width_;
int height_;
media::VideoDecodeEngine::EventHandler* handler_;
- media::VideoDecodeContext* context_;
-
- // Internal video frame that is to be uploaded through VideoDecodeContext.
- scoped_refptr<media::VideoFrame> internal_frame_;
-
- // VideoFrame(s) allocated through VideoDecodeContext. These frames are
- // opaque to us. And we need an extra upload step.
- std::vector<scoped_refptr<media::VideoFrame> > external_frames_;
DISALLOW_COPY_AND_ASSIGN(FakeGlVideoDecodeEngine);
};
diff --git a/chrome/gpu/media/fake_gl_video_device.cc b/chrome/gpu/media/fake_gl_video_device.cc
deleted file mode 100644
index df1b5b1..0000000
--- a/chrome/gpu/media/fake_gl_video_device.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/gpu/media/fake_gl_video_device.h"
-
-#include "app/gfx/gl/gl_bindings.h"
-#include "media/base/video_frame.h"
-
-void* FakeGlVideoDevice::GetDevice() {
- // No actual hardware device should be used.
- return NULL;
-}
-
-bool FakeGlVideoDevice::CreateVideoFrameFromGlTextures(
- size_t width, size_t height, media::VideoFrame::Format format,
- const std::vector<media::VideoFrame::GlTexture>& textures,
- scoped_refptr<media::VideoFrame>* frame) {
- media::VideoFrame::GlTexture texture_array[media::VideoFrame::kMaxPlanes];
- memset(texture_array, 0, sizeof(texture_array));
-
- for (size_t i = 0; i < textures.size(); ++i) {
- texture_array[i] = textures[i];
- }
-
- media::VideoFrame::CreateFrameGlTexture(format,
- width,
- height,
- texture_array,
- base::TimeDelta(),
- base::TimeDelta(),
- frame);
- return *frame != NULL;
-}
-
-void FakeGlVideoDevice::ReleaseVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame) {
- // We didn't need to anything here because we didin't allocate any resources
- // for the VideoFrame(s) generated.
-}
-
-bool FakeGlVideoDevice::UploadToVideoFrame(
- void* buffer, scoped_refptr<media::VideoFrame> frame) {
- // Assume we are in the right context and then upload the content to the
- // texture.
- glBindTexture(GL_TEXTURE_2D,
- frame->gl_texture(media::VideoFrame::kRGBPlane));
-
- // |buffer| is also a VideoFrame.
- scoped_refptr<media::VideoFrame> frame_to_upload(
- reinterpret_cast<media::VideoFrame*>(buffer));
- DCHECK_EQ(frame->width(), frame_to_upload->width());
- DCHECK_EQ(frame->height(), frame_to_upload->height());
- DCHECK_EQ(frame->format(), frame_to_upload->format());
- glTexImage2D(
- GL_TEXTURE_2D, 0, GL_RGBA, frame_to_upload->width(),
- frame_to_upload->height(), 0, GL_RGBA,
- GL_UNSIGNED_BYTE, frame_to_upload->data(media::VideoFrame::kRGBPlane));
- return true;
-}
diff --git a/chrome/gpu/media/fake_gl_video_device.h b/chrome/gpu/media/fake_gl_video_device.h
deleted file mode 100644
index 711c3ef..0000000
--- a/chrome/gpu/media/fake_gl_video_device.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
-#define CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
-
-#include "chrome/gpu/media/gpu_video_device.h"
-
-// A simple GpuVideoDevice that create VideoFrame with GL textures.
-// It uploads frames in RGBA format in system memory to the GL texture.
-class FakeGlVideoDevice : public GpuVideoDevice {
- public:
- virtual ~FakeGlVideoDevice() {}
-
- virtual void* GetDevice();
- virtual bool CreateVideoFrameFromGlTextures(
- size_t width, size_t height, media::VideoFrame::Format format,
- const std::vector<media::VideoFrame::GlTexture>& textures,
- scoped_refptr<media::VideoFrame>* frame);
- virtual void ReleaseVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame);
- virtual bool UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame);
-};
-
-#endif // CHROME_GPU_MEDIA_FAKE_GL_VIDEO_DEVICE_H_
diff --git a/chrome/gpu/media/gpu_video_device.h b/chrome/gpu/media/gpu_video_device.h
index 0556903..7998070 100644
--- a/chrome/gpu/media/gpu_video_device.h
+++ b/chrome/gpu/media/gpu_video_device.h
@@ -5,8 +5,6 @@
#ifndef CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
#define CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
-#include <vector>
-
#include "media/base/video_frame.h"
#include "media/video/video_decode_context.h"
@@ -33,25 +31,16 @@ class GpuVideoDevice {
//
// VideoFrame generated is used by VideoDecodeEngine for output buffer.
//
- // |frame| will contain the VideoFrame generated.
+ // |frames| will contain the set of VideoFrame(s) generated.
//
// Return true if the operation was successful.
virtual bool CreateVideoFrameFromGlTextures(
size_t width, size_t height, media::VideoFrame::Format format,
- const std::vector<media::VideoFrame::GlTexture>& textures,
+ media::VideoFrame::GlTexture const* textures,
scoped_refptr<media::VideoFrame>* frame) = 0;
// Release VideoFrame generated.
- virtual void ReleaseVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame) = 0;
-
- // Upload a device specific buffer to a VideoFrame object that can be used in
- // the GPU process.
- //
- // Return true if successful.
- // TODO(hclam): Rename this to ConvertToVideoFrame().
- virtual bool UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame) = 0;
+ virtual void ReleaseVideoFrame(scoped_refptr<media::VideoFrame> frame) = 0;
};
#endif // CHROME_GPU_MEDIA_GPU_VIDEO_DEVICE_H_
diff --git a/chrome/renderer/media/gles2_video_decode_context.cc b/chrome/renderer/media/gles2_video_decode_context.cc
index b7eec02..2c04282 100644
--- a/chrome/renderer/media/gles2_video_decode_context.cc
+++ b/chrome/renderer/media/gles2_video_decode_context.cc
@@ -29,11 +29,6 @@ void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
// TODO(hclam): Implement.
}
-void Gles2VideoDecodeContext::UploadToVideoFrame(
- void* buffer, scoped_refptr<media::VideoFrame> frame, Task* task) {
- // TODO(hclam): Implement.
-}
-
void Gles2VideoDecodeContext::Destroy(Task* task) {
// TODO(hclam): Implement.
}
diff --git a/chrome/renderer/media/gles2_video_decode_context.h b/chrome/renderer/media/gles2_video_decode_context.h
index 4f556ab..35958b6 100644
--- a/chrome/renderer/media/gles2_video_decode_context.h
+++ b/chrome/renderer/media/gles2_video_decode_context.h
@@ -99,10 +99,7 @@ class Gles2VideoDecodeContext : public media::VideoDecodeContext {
int n, size_t width, size_t height, media::VideoFrame::Format format,
std::vector<scoped_refptr<media::VideoFrame> >* frames, Task* task);
virtual void ReleaseAllVideoFrames();
- virtual void UploadToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame,
- Task* task);
- virtual void Destroy(Task* task);
+ virtual void Destroy(Task* task) = 0;
//--------------------------------------------------------------------------
// Any thread
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index c79f679..6faa410e 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -95,7 +95,7 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = av_stream;
config.width = width_;
config.height = height_;
- decode_engine_->Initialize(message_loop(), this, NULL, config);
+ decode_engine_->Initialize(message_loop(), this, config);
}
void FFmpegVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 6ed176f..6f2eb95 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -18,7 +18,6 @@
#include "media/filters/ffmpeg_interfaces.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/video/video_decode_engine.h"
-#include "media/video/video_decode_context.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -51,9 +50,8 @@ class MockFFmpegDemuxerStream : public MockDemuxerStream,
// TODO(hclam): Share this in a separate file.
class MockVideoDecodeEngine : public VideoDecodeEngine {
public:
- MOCK_METHOD4(Initialize, void(MessageLoop* message_loop,
+ MOCK_METHOD3(Initialize, void(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config));
MOCK_METHOD1(ConsumeVideoSample, void(scoped_refptr<Buffer> buffer));
MOCK_METHOD1(ProduceVideoFrame, void(scoped_refptr<VideoFrame> buffer));
@@ -184,7 +182,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _))
.WillOnce(EngineInitialize(engine_, true));
EXPECT_CALL(callback_, OnFilterCallback());
@@ -266,7 +264,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
EXPECT_CALL(*demuxer_, GetAVStream())
.WillOnce(Return(&stream_));
- EXPECT_CALL(*engine_, Initialize(_, _, _, _))
+ EXPECT_CALL(*engine_, Initialize(_, _, _))
.WillOnce(EngineInitialize(engine_, false));
EXPECT_CALL(host_, SetError(PIPELINE_ERROR_DECODE));
diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc
index 82793de..80a5a13 100644
--- a/media/filters/omx_video_decoder.cc
+++ b/media/filters/omx_video_decoder.cc
@@ -110,7 +110,7 @@ void OmxVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
config.opaque_context = NULL;
config.width = width_;
config.height = height_;
- omx_engine_->Initialize(message_loop(), this, NULL, config);
+ omx_engine_->Initialize(message_loop(), this, config);
}
void OmxVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index e8d6b05..6d85f70 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -170,7 +170,6 @@ MftH264Decoder::~MftH264Decoder() {
void MftH264Decoder::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config) {
LOG(INFO) << "MftH264Decoder::Initialize";
if (state_ != kUninitialized) {
diff --git a/media/mf/mft_h264_decoder.h b/media/mf/mft_h264_decoder.h
index 57c9e9f..61e3c65 100644
--- a/media/mf/mft_h264_decoder.h
+++ b/media/mf/mft_h264_decoder.h
@@ -36,8 +36,7 @@ class MftH264Decoder : public media::VideoDecodeEngine {
explicit MftH264Decoder(bool use_dxva, HWND draw_window);
~MftH264Decoder();
virtual void Initialize(MessageLoop* message_loop,
- VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
+ media::VideoDecodeEngine::EventHandler* event_handler,
const VideoCodecConfig& config);
virtual void Uninitialize();
virtual void Flush();
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index 0ed9553..788a2ca 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -362,7 +362,11 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
return -1;
}
- mft->Initialize(MessageLoop::current(), handler.get(), NULL, config);
+ mft->Initialize(MessageLoop::current(), handler.get(), config);
+ if (!handler->info_.success) {
+ LOG(ERROR) << "Failed to initialize decoder";
+ return -1;
+ }
scoped_ptr<WindowObserver> observer;
if (render) {
observer.reset(new WindowObserver(reader.get(), mft.get()));
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index 11959f7..67dc07c 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -207,7 +207,7 @@ TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(NULL, NULL, NULL, config);
+ decoder->Initialize(NULL, NULL, config);
EXPECT_EQ(MftH264Decoder::kUninitialized, decoder->state());
}
@@ -219,7 +219,7 @@ TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -235,7 +235,7 @@ TEST_F(MftH264DecoderTest, DecoderInitDxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(1, handler.init_count_);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
@@ -250,7 +250,7 @@ TEST_F(MftH264DecoderTest, DecoderUninit) {
config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Uninitialize();
EXPECT_EQ(1, handler.uninit_count_);
@@ -277,7 +277,7 @@ TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
config.height = -456;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -292,7 +292,7 @@ TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
config.height = kDecoderMaxHeight + 1;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
@@ -307,7 +307,7 @@ TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
scoped_refptr<Buffer> buffer(new DataBuffer(0));
@@ -336,7 +336,7 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -364,7 +364,7 @@ TEST_F(MftH264DecoderTest, FlushAtStart) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
decoder->Flush();
@@ -384,7 +384,7 @@ TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false, NULL));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
@@ -429,7 +429,7 @@ void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
ASSERT_TRUE(hwnd);
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(dxva, hwnd));
ASSERT_TRUE(decoder.get());
- decoder->Initialize(&loop, &handler, NULL, config);
+ decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
handler.SetReader(reader);
handler.SetDecoder(decoder.get());
diff --git a/media/tools/omx_test/omx_test.cc b/media/tools/omx_test/omx_test.cc
index 505f060..44b728f 100644
--- a/media/tools/omx_test/omx_test.cc
+++ b/media/tools/omx_test/omx_test.cc
@@ -189,7 +189,7 @@ class TestApp : public base::RefCountedThreadSafe<TestApp>,
config.width = av_stream_->codec->width;
config.height = av_stream_->codec->height;
engine_.reset(new OmxVideoDecodeEngine());
- engine_->Initialize(&message_loop_, this, NULL, config);
+ engine_->Initialize(&message_loop_, this, config);
// Execute the message loop so that we can run tasks on it. This call
// will return when we call message_loop_.Quit().
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 7a8181b..75be752 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -35,7 +35,6 @@ FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
void FFmpegVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config) {
allocator_.reset(new FFmpegVideoAllocator());
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index a7ce4e4..bc1e033 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -28,7 +28,6 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
index 9f737ba..7db2f55 100644
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/video/ffmpeg_video_decode_engine_unittest.cc
@@ -91,7 +91,7 @@ class FFmpegVideoDecodeEngineTest : public testing::Test,
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, config_);
EXPECT_TRUE(info_.success);
}
@@ -143,7 +143,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, config_);
EXPECT_FALSE(info_.success);
}
@@ -165,7 +165,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, config_);
EXPECT_FALSE(info_.success);
}
@@ -188,7 +188,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, config_);
EXPECT_FALSE(info_.success);
}
diff --git a/media/video/omx_video_decode_engine.cc b/media/video/omx_video_decode_engine.cc
index 0df0f1e..e1bcc7f 100644
--- a/media/video/omx_video_decode_engine.cc
+++ b/media/video/omx_video_decode_engine.cc
@@ -82,7 +82,6 @@ static void ResetParamHeader(const OmxVideoDecodeEngine& dec, T* param) {
void OmxVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config) {
DCHECK_EQ(message_loop, MessageLoop::current());
diff --git a/media/video/omx_video_decode_engine.h b/media/video/omx_video_decode_engine.h
index c5b3882..8347eed 100644
--- a/media/video/omx_video_decode_engine.h
+++ b/media/video/omx_video_decode_engine.h
@@ -28,7 +28,6 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/video_decode_context.h b/media/video/video_decode_context.h
index f768a0a..795b136 100644
--- a/media/video/video_decode_context.h
+++ b/media/video/video_decode_context.h
@@ -20,13 +20,6 @@ class VideoFrame;
// 1. Provides access to hardware video decoding device.
// 2. Allocate VideoFrame objects that are used to carry the decoded video
// frames.
-// 3. Upload a device specific buffer to some common VideoFrame storage types.
-// In many cases a VideoDecodeEngine provides its own buffer, these buffer
-// are usually device specific and a conversion step is needed. Instead of
-// handling these many cases in the renderer a VideoDecodeContext is used
-// to convert the device specific buffer to a common storage format, e.g.
-// GL textures or system memory. This way we keep the device specific code
-// in the VideoDecodeEngine and VideoDecodeContext pair.
class VideoDecodeContext {
public:
virtual ~VideoDecodeContext() {};
@@ -52,30 +45,11 @@ class VideoDecodeContext {
// could be destroyed.
virtual void ReleaseAllVideoFrames() = 0;
- // Upload a device specific buffer to a video frame. The video frame was
- // allocated via AllocateVideoFrames().
- // This method is used if a VideoDecodeEngine cannot write directly to a
- // VideoFrame, e.g. upload should be done on a different thread, the subsystem
- // require some special treatment to generate a VideoFrame. The goal is to
- // keep VideoDecodeEngine a reusable component and also adapt to different
- // system by having a different VideoDecodeContext.
- //
- // |frame| is a VideoFrame allocated via AllocateVideoFrames().
- //
- // |buffer| is of type void*, it is of an internal type in VideoDecodeEngine
- // that points to the buffer that contains the video frame.
- // Implementor should know how to handle it.
- //
- // |task| is executed if the operation was completed successfully.
- // TODO(hclam): Rename this to ConvertToVideoFrame().
- virtual void UploadToVideoFrame(void* buffer, scoped_refptr<VideoFrame> frame,
- Task* task) = 0;
-
// Destroy this context asynchronously. When the operation is done |task|
// is called.
//
// ReleaseVideoFrames() need to be called with all the video frames allocated
- // before making this call.
+ // before making this call.
virtual void Destroy(Task* task) = 0;
};
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index f9381a2..8736890 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -12,7 +12,6 @@
namespace media {
class Buffer;
-class VideoDecodeContext;
enum VideoCodec {
kCodecH264,
@@ -117,19 +116,13 @@ class VideoDecodeEngine {
virtual ~VideoDecodeEngine() {}
- // Initialize the engine with specified configuration.
- //
- // |decode_context| is used for allocation of VideoFrame.
- // It is important that |decode_context| is called only on |message_loop|.
- //
- // TODO(hclam): Currently refactoring code to use VideoDecodeContext so
- // |context| may be NULL in some cases.
- //
- // Engine should call EventHandler::OnInitializeDone() whether the
- // initialization operation finished successfully or not.
+ // Initialized the engine with specified configuration. |message_loop| could
+ // be NULL if every operation is synchronous. Engine should call the
+ // EventHandler::OnInitializeDone() no matter finished successfully or not.
+ // TODO(jiesun): remove message_loop and create thread inside openmax engine?
+ // or create thread in GpuVideoDecoder and pass message loop here?
virtual void Initialize(MessageLoop* message_loop,
EventHandler* event_handler,
- VideoDecodeContext* context,
const VideoCodecConfig& config) = 0;
// Uninitialize the engine. Engine should destroy all resources and call