summaryrefslogtreecommitdiffstats
path: root/content/renderer
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 20:06:02 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 20:06:02 +0000
commitd0cef8b4da166d1835f7e4caf6021cce978e2ddd (patch)
treec26c6b2d093e580f536029163cf6956f97efc541 /content/renderer
parentf2fcf4df7b83cba250b64877116038eb0d285c0a (diff)
downloadchromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.zip
chromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.tar.gz
chromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.tar.bz2
Removing defunct GpuVideoDecoder and IpcVideoDecoder.
These haven't been used in quite some time and have been replaced by the newer VideoDecoderAccelerator set of classes. BUG=none TEST=the world still compiles Review URL: http://codereview.chromium.org/6993016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87841 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer')
-rw-r--r--content/renderer/gpu/gpu_video_decoder_host.cc401
-rw-r--r--content/renderer/gpu/gpu_video_decoder_host.h165
-rw-r--r--content/renderer/gpu/gpu_video_service_host.cc6
-rw-r--r--content/renderer/gpu/gpu_video_service_host.h16
-rw-r--r--content/renderer/gpu/renderer_gl_context.cc11
-rw-r--r--content/renderer/gpu/renderer_gl_context.h19
-rw-r--r--content/renderer/gpu/transport_texture_service.h1
-rw-r--r--content/renderer/media/gles2_video_decode_context.cc122
-rw-r--r--content/renderer/media/gles2_video_decode_context.h112
-rw-r--r--content/renderer/media/ipc_video_decoder.cc207
-rw-r--r--content/renderer/media/ipc_video_decoder.h89
-rw-r--r--content/renderer/render_view.cc18
12 files changed, 0 insertions, 1167 deletions
diff --git a/content/renderer/gpu/gpu_video_decoder_host.cc b/content/renderer/gpu/gpu_video_decoder_host.cc
deleted file mode 100644
index 0451960..0000000
--- a/content/renderer/gpu/gpu_video_decoder_host.cc
+++ /dev/null
@@ -1,401 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/gpu/gpu_video_decoder_host.h"
-
-#include "content/common/gpu/gpu_messages.h"
-#include "content/common/message_router.h"
-#include "media/base/pipeline.h"
-#include "media/video/video_decode_context.h"
-
-GpuVideoDecoderHost::GpuVideoDecoderHost(MessageRouter* router,
- IPC::Message::Sender* ipc_sender,
- int context_route_id,
- int32 decoder_host_id)
- : router_(router),
- ipc_sender_(ipc_sender),
- context_route_id_(context_route_id),
- message_loop_(NULL),
- event_handler_(NULL),
- context_(NULL),
- width_(0),
- height_(0),
- state_(kStateUninitialized),
- decoder_host_id_(decoder_host_id),
- decoder_id_(0),
- input_buffer_busy_(false),
- current_frame_id_(0) {
-}
-
-GpuVideoDecoderHost::~GpuVideoDecoderHost() {}
-
-void GpuVideoDecoderHost::OnChannelError() {
- ipc_sender_ = NULL;
-}
-
-bool GpuVideoDecoderHost::OnMessageReceived(const IPC::Message& msg) {
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(GpuVideoDecoderHost, msg)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_CreateVideoDecoderDone,
- OnCreateVideoDecoderDone)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_InitializeACK,
- OnInitializeDone)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_DestroyACK,
- OnUninitializeDone)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_FlushACK,
- OnFlushDone)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_PrerollDone,
- OnPrerollDone)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_EmptyThisBufferACK,
- OnEmptyThisBufferACK)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_EmptyThisBufferDone,
- OnProduceVideoSample)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_ConsumeVideoFrame,
- OnConsumeVideoFrame)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_AllocateVideoFrames,
- OnAllocateVideoFrames)
- IPC_MESSAGE_HANDLER(GpuVideoDecoderHostMsg_ReleaseAllVideoFrames,
- OnReleaseAllVideoFrames)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
- DCHECK(handled);
- return handled;
-}
-
-void GpuVideoDecoderHost::Initialize(
- MessageLoop* message_loop,
- VideoDecodeEngine::EventHandler* event_handler,
- media::VideoDecodeContext* context,
- const media::VideoDecoderConfig& config) {
- DCHECK_EQ(kStateUninitialized, state_);
- DCHECK(!message_loop_);
- message_loop_ = message_loop;
- event_handler_ = event_handler;
- context_ = context;
- width_ = config.width();
- height_ = config.height();
-
- if (MessageLoop::current() != message_loop) {
- message_loop->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &GpuVideoDecoderHost::CreateVideoDecoder));
- return;
- }
- CreateVideoDecoder();
-}
-
-void GpuVideoDecoderHost::ConsumeVideoSample(scoped_refptr<Buffer> buffer) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(
- this, &GpuVideoDecoderHost::ConsumeVideoSample, buffer));
- return;
- }
-
- DCHECK_NE(state_, kStateUninitialized);
- DCHECK_NE(state_, kStateFlushing);
-
- // We never own input buffers, therefore when client in flush state, it
- // never call us with EmptyThisBuffer.
- if (state_ != kStateNormal)
- return;
-
- input_buffer_queue_.push_back(buffer);
- SendConsumeVideoSample();
-}
-
-void GpuVideoDecoderHost::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(
- this, &GpuVideoDecoderHost::ProduceVideoFrame, frame));
- return;
- }
-
- DCHECK_NE(state_, kStateUninitialized);
-
- // During flush client of this object will call this method to return all
- // video frames. We should only ignore such method calls if we are in error
- // state.
- if (state_ == kStateError)
- return;
-
- // Check that video frame is valid.
- if (!frame || frame->format() == media::VideoFrame::EMPTY ||
- frame->IsEndOfStream()) {
- return;
- }
-
- SendProduceVideoFrame(frame);
-}
-
-void GpuVideoDecoderHost::Uninitialize() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &GpuVideoDecoderHost::Uninitialize));
- return;
- }
-
- if (!ipc_sender_->Send(new GpuVideoDecoderMsg_Destroy(decoder_id_))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_Destroy failed";
- event_handler_->OnError();
- }
-}
-
-void GpuVideoDecoderHost::Flush() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE, NewRunnableMethod(this, &GpuVideoDecoderHost::Flush));
- return;
- }
-
- state_ = kStateFlushing;
- if (!ipc_sender_->Send(new GpuVideoDecoderMsg_Flush(decoder_id_))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_Flush failed";
- event_handler_->OnError();
- return;
- }
-
- input_buffer_queue_.clear();
- // TODO(jiesun): because GpuVideoDeocder/GpuVideoDecoder are asynchronously.
- // We need a way to make flush logic more clear. but I think ring buffer
- // should make the busy flag obsolete, therefore I will leave it for now.
- input_buffer_busy_ = false;
-}
-
-void GpuVideoDecoderHost::Seek() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE, NewRunnableMethod(this, &GpuVideoDecoderHost::Seek));
- return;
- }
-
- if (!ipc_sender_->Send(new GpuVideoDecoderMsg_Preroll(decoder_id_))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_Preroll failed";
- event_handler_->OnError();
- return;
- }
-}
-
-void GpuVideoDecoderHost::CreateVideoDecoder() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- // Add the route so we'll receive messages.
- router_->AddRoute(decoder_host_id_, this);
-
- if (!ipc_sender_->Send(
- new GpuChannelMsg_CreateVideoDecoder(context_route_id_,
- decoder_host_id_))) {
- LOG(ERROR) << "GpuChannelMsg_CreateVideoDecoder failed";
- event_handler_->OnError();
- return;
- }
-}
-
-void GpuVideoDecoderHost::OnCreateVideoDecoderDone(int32 decoder_id) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- decoder_id_ = decoder_id;
-
- // TODO(hclam): Initialize |param| with the right values.
- GpuVideoDecoderInitParam param;
- param.width = width_;
- param.height = height_;
-
- if (!ipc_sender_->Send(
- new GpuVideoDecoderMsg_Initialize(decoder_id, param))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_Initialize failed";
- event_handler_->OnError();
- }
-}
-
-void GpuVideoDecoderHost::OnInitializeDone(
- const GpuVideoDecoderInitDoneParam& param) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- bool success = param.success &&
- base::SharedMemory::IsHandleValid(param.input_buffer_handle);
-
- if (success) {
- input_transfer_buffer_.reset(
- new base::SharedMemory(param.input_buffer_handle, false));
- success = input_transfer_buffer_->Map(param.input_buffer_size);
- }
- state_ = success ? kStateNormal : kStateError;
-
- // TODO(hclam): There's too many unnecessary copies for width and height!
- // Need to clean it up.
- // TODO(hclam): Need to fill in more information.
- media::VideoCodecInfo info;
- info.success = success;
- info.stream_info.surface_width = width_;
- info.stream_info.surface_height = height_;
- event_handler_->OnInitializeComplete(info);
-}
-
-void GpuVideoDecoderHost::OnUninitializeDone() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- input_transfer_buffer_.reset();
- router_->RemoveRoute(decoder_host_id_);
- context_->ReleaseAllVideoFrames();
- event_handler_->OnUninitializeComplete();
-}
-
-void GpuVideoDecoderHost::OnFlushDone() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- state_ = kStateNormal;
- event_handler_->OnFlushComplete();
-}
-
-void GpuVideoDecoderHost::OnPrerollDone() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- state_ = kStateNormal;
- event_handler_->OnSeekComplete();
-}
-
-void GpuVideoDecoderHost::OnEmptyThisBufferACK() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- input_buffer_busy_ = false;
- SendConsumeVideoSample();
-}
-
-void GpuVideoDecoderHost::OnProduceVideoSample() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- DCHECK_EQ(kStateNormal, state_);
-
- event_handler_->ProduceVideoSample(NULL);
-}
-
-void GpuVideoDecoderHost::OnConsumeVideoFrame(int32 frame_id, int64 timestamp,
- int64 duration, int32 flags) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- scoped_refptr<VideoFrame> frame;
- if (flags & kGpuVideoEndOfStream) {
- VideoFrame::CreateEmptyFrame(&frame);
- } else {
- frame = video_frame_map_[frame_id];
- DCHECK(frame) << "Invalid frame ID received";
-
- frame->SetDuration(base::TimeDelta::FromMicroseconds(duration));
- frame->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp));
- }
-
- media::PipelineStatistics statistics;
- // TODO(sjl): Fill in statistics.
-
- event_handler_->ConsumeVideoFrame(frame, statistics);
-}
-
-void GpuVideoDecoderHost::OnAllocateVideoFrames(
- int32 n, uint32 width, uint32 height, int32 format) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- DCHECK_EQ(0u, video_frames_.size());
-
- context_->AllocateVideoFrames(
- n, width, height, static_cast<media::VideoFrame::Format>(format),
- &video_frames_,
- NewRunnableMethod(this,
- &GpuVideoDecoderHost::OnAllocateVideoFramesDone));
-}
-
-void GpuVideoDecoderHost::OnReleaseAllVideoFrames() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- context_->ReleaseAllVideoFrames();
- video_frame_map_.clear();
- video_frames_.clear();
-}
-
-void GpuVideoDecoderHost::OnAllocateVideoFramesDone() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(
- this, &GpuVideoDecoderHost::OnAllocateVideoFramesDone));
- return;
- }
-
- // After video frame allocation is done we add these frames to a map and
- // send them to the GPU process.
- DCHECK(video_frames_.size()) << "No video frames allocated";
- for (size_t i = 0; i < video_frames_.size(); ++i) {
- DCHECK(video_frames_[i]);
- video_frame_map_.insert(
- std::make_pair(current_frame_id_, video_frames_[i]));
- SendVideoFrameAllocated(current_frame_id_, video_frames_[i]);
- ++current_frame_id_;
- }
-}
-
-void GpuVideoDecoderHost::SendVideoFrameAllocated(
- int32 frame_id, scoped_refptr<media::VideoFrame> frame) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- std::vector<uint32> textures;
- for (size_t i = 0; i < frame->planes(); ++i) {
- textures.push_back(frame->gl_texture(i));
- }
-
- if (!ipc_sender_->Send(new GpuVideoDecoderMsg_VideoFrameAllocated(
- decoder_id_, frame_id, textures))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBuffer failed";
- }
-}
-
-void GpuVideoDecoderHost::SendConsumeVideoSample() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- if (input_buffer_busy_ || input_buffer_queue_.empty())
- return;
- input_buffer_busy_ = true;
-
- scoped_refptr<Buffer> buffer = input_buffer_queue_.front();
- input_buffer_queue_.pop_front();
-
- // Send input data to GPU process.
- GpuVideoDecoderInputBufferParam param;
- param.offset = 0;
- param.size = buffer->GetDataSize();
- param.timestamp = buffer->GetTimestamp().InMicroseconds();
- memcpy(input_transfer_buffer_->memory(), buffer->GetData(), param.size);
-
- if (!ipc_sender_->Send(
- new GpuVideoDecoderMsg_EmptyThisBuffer(decoder_id_, param))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_EmptyThisBuffer failed";
- }
-}
-
-void GpuVideoDecoderHost::SendProduceVideoFrame(
- scoped_refptr<media::VideoFrame> frame) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
-
- // TODO(hclam): I should mark a frame being used to DCHECK and make sure
- // user doesn't use it the second time.
- // TODO(hclam): Derive a faster way to lookup the frame ID.
- bool found = false;
- int32 frame_id = 0;
- for (VideoFrameMap::iterator i = video_frame_map_.begin();
- i != video_frame_map_.end(); ++i) {
- if (frame == i->second) {
- frame_id = i->first;
- found = true;
- break;
- }
- }
-
- DCHECK(found) << "Invalid video frame received";
- if (found && !ipc_sender_->Send(
- new GpuVideoDecoderMsg_ProduceVideoFrame(decoder_id_, frame_id))) {
- LOG(ERROR) << "GpuVideoDecoderMsg_ProduceVideoFrame failed";
- }
-}
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(GpuVideoDecoderHost);
diff --git a/content/renderer/gpu/gpu_video_decoder_host.h b/content/renderer/gpu/gpu_video_decoder_host.h
deleted file mode 100644
index cfd42b9..0000000
--- a/content/renderer/gpu/gpu_video_decoder_host.h
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_GPU_GPU_VIDEO_DECODER_HOST_H_
-#define CONTENT_RENDERER_GPU_GPU_VIDEO_DECODER_HOST_H_
-
-#include <deque>
-#include <map>
-
-#include "base/memory/singleton.h"
-#include "base/shared_memory.h"
-#include "content/renderer/gpu/gpu_channel_host.h"
-#include "media/base/buffers.h"
-#include "media/base/video_frame.h"
-#include "media/video/video_decode_engine.h"
-
-using media::VideoFrame;
-using media::Buffer;
-
-class MessageRouter;
-struct GpuVideoDecoderInitDoneParam;
-
-// This class is used to talk to GpuVideoDecoder in the GPU process through
-// IPC messages. It implements the interface of VideoDecodeEngine so users
-// view it as a regular video decode engine, the implementation is a portal
-// to the GPU process.
-//
-// THREAD SEMANTICS
-//
-// All methods of this class can be accessed on any thread. A message loop
-// needs to be provided to class through Initialize() for accessing the
-// IPC channel. Event handlers are called on that message loop.
-//
-// Since this class is not refcounted, it is important to delete this
-// object only after OnUninitializeCompelte() is called.
-class GpuVideoDecoderHost : public media::VideoDecodeEngine,
- public IPC::Channel::Listener {
- public:
- // |router| is used to dispatch IPC messages to this object.
- // |ipc_sender| is used to send IPC messages to GPU process.
- // It is important that the above two objects are accessed on the
- // |message_loop_|.
- GpuVideoDecoderHost(MessageRouter* router,
- IPC::Message::Sender* ipc_sender,
- int context_route_id,
- int32 decoder_host_id);
- virtual ~GpuVideoDecoderHost();
-
- // IPC::Channel::Listener.
- virtual void OnChannelConnected(int32 peer_pid) {}
- virtual void OnChannelError();
- virtual bool OnMessageReceived(const IPC::Message& message);
-
- // media::VideoDecodeEngine implementation.
- virtual void Initialize(MessageLoop* message_loop,
- VideoDecodeEngine::EventHandler* event_handler,
- media::VideoDecodeContext* context,
- const media::VideoDecoderConfig& config);
- virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
- virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
- virtual void Uninitialize();
- virtual void Flush();
- virtual void Seek();
-
- private:
- typedef std::map<int32, scoped_refptr<media::VideoFrame> > VideoFrameMap;
-
- // Internal states.
- enum GpuVideoDecoderHostState {
- kStateUninitialized,
- kStateNormal,
- kStateError,
- kStateFlushing,
- };
-
- // Takes care of sending IPC message to create a video decoder.
- void CreateVideoDecoder();
-
- // Handlers for messages received from the GPU process.
- void OnCreateVideoDecoderDone(int32 decoder_id);
- void OnInitializeDone(const GpuVideoDecoderInitDoneParam& param);
- void OnUninitializeDone();
- void OnFlushDone();
- void OnPrerollDone();
- void OnEmptyThisBufferACK();
- void OnProduceVideoSample();
- void OnConsumeVideoFrame(int32 frame_id, int64 timestamp,
- int64 duration, int32 flags);
- void OnAllocateVideoFrames(int32 n, uint32 width,
- uint32 height, int32 format);
- void OnReleaseAllVideoFrames();
-
- // Handler for VideoDecodeContext. This method is called when video frames
- // allocation is done.
- void OnAllocateVideoFramesDone();
-
- // Send a message to the GPU process to inform that a video frame is
- // allocated.
- void SendVideoFrameAllocated(int32 frame_id,
- scoped_refptr<media::VideoFrame> frame);
-
- // Send a video sample to the GPU process and tell it to use the buffer for
- // video decoding.
- void SendConsumeVideoSample();
-
- // Look up the frame_id for |frame| and send a message to the GPU process
- // to use that video frame to produce an output.
- void SendProduceVideoFrame(scoped_refptr<media::VideoFrame> frame);
-
- // A router used to send us IPC messages.
- MessageRouter* router_;
-
- // Sends IPC messages to the GPU process.
- IPC::Message::Sender* ipc_sender_;
-
- // Route ID of the GLES2 context in the GPU process.
- int context_route_id_;
-
- // Message loop that this object runs on.
- MessageLoop* message_loop_;
-
- // We expect that the client of us will always available during our life span.
- EventHandler* event_handler_;
-
- // A Context for allocating video frame textures.
- media::VideoDecodeContext* context_;
-
- // Dimensions of the video.
- int width_;
- int height_;
-
- // Current state of video decoder.
- GpuVideoDecoderHostState state_;
-
- // ID of this GpuVideoDecoderHost.
- int32 decoder_host_id_;
-
- // ID of GpuVideoDecoder in the GPU process.
- int32 decoder_id_;
-
- // We are not able to push all received buffer to gpu process at once.
- std::deque<scoped_refptr<Buffer> > input_buffer_queue_;
-
- // Currently we do not use ring buffer in input buffer, therefore before
- // GPU process had finished access it, we should not touch it.
- bool input_buffer_busy_;
-
- // Transfer buffers for both input and output.
- // TODO(jiesun): remove output buffer when hardware composition is ready.
- scoped_ptr<base::SharedMemory> input_transfer_buffer_;
-
- // Frame ID for the newly generated video frame.
- int32 current_frame_id_;
-
- // The list of video frames allocated by VideoDecodeContext.
- std::vector<scoped_refptr<media::VideoFrame> > video_frames_;
-
- // The mapping between video frame ID and a video frame.
- VideoFrameMap video_frame_map_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuVideoDecoderHost);
-};
-
-#endif // CONTENT_RENDERER_GPU_GPU_VIDEO_DECODER_HOST_H_
diff --git a/content/renderer/gpu/gpu_video_service_host.cc b/content/renderer/gpu/gpu_video_service_host.cc
index 2e7a39e..0c629f8 100644
--- a/content/renderer/gpu/gpu_video_service_host.cc
+++ b/content/renderer/gpu/gpu_video_service_host.cc
@@ -70,12 +70,6 @@ void GpuVideoServiceHost::SetOnInitialized(
on_initialized.Run();
}
-GpuVideoDecoderHost* GpuVideoServiceHost::CreateVideoDecoder(
- int context_route_id) {
- // TODO(vrk): Delete all references to GpuVideoDecoder (deprecated).
- return NULL;
-}
-
GpuVideoDecodeAcceleratorHost* GpuVideoServiceHost::CreateVideoAccelerator(
media::VideoDecodeAccelerator::Client* client) {
base::AutoLock auto_lock(lock_);
diff --git a/content/renderer/gpu/gpu_video_service_host.h b/content/renderer/gpu/gpu_video_service_host.h
index 7049d1c..be3e44b 100644
--- a/content/renderer/gpu/gpu_video_service_host.h
+++ b/content/renderer/gpu/gpu_video_service_host.h
@@ -7,7 +7,6 @@
#include "base/memory/singleton.h"
#include "content/renderer/gpu/gpu_channel_host.h"
-#include "content/renderer/gpu/gpu_video_decoder_host.h"
#include "ipc/ipc_channel.h"
#include "media/base/buffers.h"
#include "media/base/video_frame.h"
@@ -37,21 +36,6 @@ class GpuVideoServiceHost : public IPC::ChannelProxy::MessageFilter {
// Called on RenderThread to create a hardware accelerated video decoder
// in the GPU process.
- //
- // A routing ID for the GLES2 context needs to be provided when creating a
- // hardware video decoder. This is important because the resources used by
- // the video decoder needs to be shared with the GLES2 context corresponding
- // to the RenderView.
- //
- // This means that a GPU video decoder is tied to a specific RenderView and
- // its GLES2 context in the GPU process.
- //
- // Returns a GpuVideoDecoderHost as a handle to control the video decoder.
- //
- // Note: OnFilterAdded() MUST be called before these methods are called,
- // because they require |channel_| to be non-NULL.
- GpuVideoDecoderHost* CreateVideoDecoder(int context_route_id);
-
GpuVideoDecodeAcceleratorHost* CreateVideoAccelerator(
media::VideoDecodeAccelerator::Client* client);
diff --git a/content/renderer/gpu/renderer_gl_context.cc b/content/renderer/gpu/renderer_gl_context.cc
index 91e4dfc..88d79c2 100644
--- a/content/renderer/gpu/renderer_gl_context.cc
+++ b/content/renderer/gpu/renderer_gl_context.cc
@@ -17,7 +17,6 @@
#include "content/renderer/gpu/gpu_video_service_host.h"
#include "content/renderer/gpu/transport_texture_host.h"
#include "content/renderer/gpu/transport_texture_service.h"
-#include "content/renderer/media/gles2_video_decode_context.h"
#include "content/renderer/render_thread.h"
#include "content/renderer/render_widget.h"
#include "googleurl/src/gurl.h"
@@ -305,16 +304,6 @@ bool RendererGLContext::SwapBuffers() {
return true;
}
-media::VideoDecodeEngine* RendererGLContext::CreateVideoDecodeEngine() {
- return channel_->gpu_video_service_host()->CreateVideoDecoder(
- command_buffer_->route_id());
-}
-
-media::VideoDecodeContext* RendererGLContext::CreateVideoDecodeContext(
- MessageLoop* message_loop, bool hardware_decoder) {
- return new Gles2VideoDecodeContext(message_loop, hardware_decoder, this);
-}
-
scoped_refptr<TransportTextureHost>
RendererGLContext::CreateTransportTextureHost() {
return channel_->transport_texture_service()->CreateTransportTextureHost(
diff --git a/content/renderer/gpu/renderer_gl_context.h b/content/renderer/gpu/renderer_gl_context.h
index 6b255cc..3798892 100644
--- a/content/renderer/gpu/renderer_gl_context.h
+++ b/content/renderer/gpu/renderer_gl_context.h
@@ -31,12 +31,6 @@ class GLES2Implementation;
}
}
-namespace media {
-class VideoDecodeContext;
-class VideoDecodeEngine;
-class VideoDecodeRendererGLContext;
-}
-
class RendererGLContext : public base::SupportsWeakPtr<RendererGLContext> {
public:
// These are the same error codes as used by EGL.
@@ -154,19 +148,6 @@ class RendererGLContext : public base::SupportsWeakPtr<RendererGLContext> {
// by the parent RendererGLContext.
bool SwapBuffers();
- // Create a hardware video decode engine corresponding to the
- // RendererGLContext.
- media::VideoDecodeEngine* CreateVideoDecodeEngine();
-
- // Create a hardware video decode RendererGLContext to pair with the hardware
- // video decode engine. It can also be used with a software decode engine.
- //
- // Set |hardware_decoder| to true if this RendererGLContext is for a hardware
- // video engine. |message_loop| is where the decode RendererGLContext should
- // run on.
- media::VideoDecodeContext* CreateVideoDecodeContext(MessageLoop* message_loop,
- bool hardware_decoder);
-
// Create a TransportTextureHost object associated with the context.
scoped_refptr<TransportTextureHost> CreateTransportTextureHost();
diff --git a/content/renderer/gpu/transport_texture_service.h b/content/renderer/gpu/transport_texture_service.h
index 0822ef1..96f5101 100644
--- a/content/renderer/gpu/transport_texture_service.h
+++ b/content/renderer/gpu/transport_texture_service.h
@@ -10,7 +10,6 @@
#include "base/memory/ref_counted.h"
#include "content/renderer/gpu/gpu_channel_host.h"
-#include "content/renderer/gpu/gpu_video_decoder_host.h"
#include "ipc/ipc_channel.h"
#include "media/base/buffers.h"
#include "media/base/video_frame.h"
diff --git a/content/renderer/media/gles2_video_decode_context.cc b/content/renderer/media/gles2_video_decode_context.cc
deleted file mode 100644
index 8b74e23..0000000
--- a/content/renderer/media/gles2_video_decode_context.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <GLES2/gl2.h>
-
-#include "base/message_loop.h"
-#include "content/renderer/gpu/renderer_gl_context.h"
-#include "content/renderer/media/gles2_video_decode_context.h"
-
-Gles2VideoDecodeContext::Gles2VideoDecodeContext(
- MessageLoop* message_loop, bool memory_mapped, RendererGLContext* context)
- : message_loop_(message_loop),
- memory_mapped_(memory_mapped),
- context_(context) {
-}
-
-Gles2VideoDecodeContext::~Gles2VideoDecodeContext() {
-}
-
-void* Gles2VideoDecodeContext::GetDevice() {
- // This decode context is used inside the renderer and so hardware decoder
- // device handler should not be used.
- return NULL;
-}
-
-void Gles2VideoDecodeContext::AllocateVideoFrames(
- int num_frames, size_t width, size_t height,
- media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &Gles2VideoDecodeContext::AllocateVideoFrames,
- num_frames, width, height, format, frames_out,
- task));
- return;
- }
-
- // In this method we need to make the context current and then generate
- // textures for each video frame. We also need to allocate memory for each
- // texture generated.
- bool ret = RendererGLContext::MakeCurrent(context_);
- CHECK(ret) << "Failed to switch context";
-
- frames_.resize(num_frames);
- for (int i = 0; i < num_frames; ++i) {
- int planes = media::VideoFrame::GetNumberOfPlanes(format);
- media::VideoFrame::GlTexture textures[media::VideoFrame::kMaxPlanes];
-
- // Set the color format of the textures.
- DCHECK(format == media::VideoFrame::RGBA ||
- format == media::VideoFrame::YV12);
- int gl_format = format == media::VideoFrame::RGBA ? GL_RGBA : GL_LUMINANCE;
-
- glGenTextures(planes, textures);
- for (int j = 0; j < planes; ++j) {
- glBindTexture(GL_TEXTURE_2D, textures[j]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexImage2D(GL_TEXTURE_2D, 0, gl_format, width, height, 0, gl_format,
- GL_UNSIGNED_BYTE, NULL);
- }
- glFlush();
-
- scoped_refptr<media::VideoFrame> frame;
- media::VideoFrame::CreateFrameGlTexture(format, width, height, textures,
- &frame);
- frames_[i] = frame;
- }
- *frames_out = frames_;
-
- task->Run();
- delete task;
-}
-
-void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &Gles2VideoDecodeContext::ReleaseAllVideoFrames));
- return;
- }
-
- // Make the context current and then release the video frames.
- bool ret = RendererGLContext::MakeCurrent(context_);
- CHECK(ret) << "Failed to switch context";
-
- for (size_t i = 0; i < frames_.size(); ++i) {
- for (size_t j = 0; j < frames_[i]->planes(); ++j) {
- media::VideoFrame::GlTexture texture = frames_[i]->gl_texture(j);
- glDeleteTextures(1, &texture);
- }
- }
- frames_.clear();
-}
-
-void Gles2VideoDecodeContext::ConvertToVideoFrame(
- void* buffer, scoped_refptr<media::VideoFrame> frame, Task* task) {
- DCHECK(memory_mapped_);
- // TODO(hclam): Implement.
-}
-
-void Gles2VideoDecodeContext::Destroy(Task* task) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &Gles2VideoDecodeContext::Destroy, task));
- return;
- }
-
- ReleaseAllVideoFrames();
- DCHECK_EQ(0u, frames_.size());
-
- task->Run();
- delete task;
-}
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(Gles2VideoDecodeContext);
diff --git a/content/renderer/media/gles2_video_decode_context.h b/content/renderer/media/gles2_video_decode_context.h
deleted file mode 100644
index 70b7f71..0000000
--- a/content/renderer/media/gles2_video_decode_context.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
-#define CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
-
-#include <vector>
-
-#include "media/video/video_decode_context.h"
-
-class MessageLoop;
-class RendererGLContext;
-
-// FUNCTIONS
-//
-// This is a class that provides a video decode context using a
-// RendererGLContext backend.
-//
-// It provides resources for a VideoDecodeEngine to store decoded video frames.
-//
-// This class is aware of the command buffer implementation of GLES2 inside the
-// Chrome renderer and keeps a reference of RendererGLContext. It might use
-// GLES2 commands specific to Chrome's renderer process to provide needed
-// resources.
-//
-// There are two different kinds of video frame storage provided by this class:
-// 1. Memory mapped textures (aka software decoding mode).
-// Each texture is memory mapped and appears to the VideoDecodeEngine as
-// system memory.
-//
-// The usage of the textures is that the VideoDecodeEngine is performing
-// software video decoding and use them as if they are allocated in plain
-// system memory (in fact they are allocated in system memory and shared
-// bwith the GPU process). An additional step of uploading the content to
-// video memory is needed. Since VideoDecodeEngine is unaware of the video
-// memory, this upload operation is performed by calling
-// ConvertToVideoFrame().
-//
-// After the content is uploaded to video memory, WebKit will see the video
-// frame as textures and will perform the necessary operations for
-// rendering.
-//
-// 2. Opaque textures (aka hardware decoding mode).
-// In this mode of operation each video frame is backed by some opaque
-// textures. This is used only when hardware video decoding is used. The
-// textures needs to be generated and allocated inside the renderer process
-// first. This will establish a translation between texture ID in the
-// renderer process and the GPU process.
-//
-// The texture ID generated is used by IpcVideoDecodeEngine only to be sent
-// the GPU process. Inside the GPU process the texture ID is translated to
-// a real texture ID inside the actual context. The real texture ID is then
-// assigned to the hardware video decoder for storing the video frame.
-//
-// WebKit will see the video frame as a normal textures and perform
-// necessary render operations.
-//
-// In both operation modes, the objective is to have WebKit see the video frames
-// as regular textures.
-//
-// THREAD SEMANTICS
-//
-// All methods of this class can be called on any thread. GLES2 context and all
-// OpenGL method calls are accessed on the Render Thread. As as result all Tasks
-// given to this object are executed on the Render Thread.
-//
-// Since this class is not refcounted, it is important to destroy objects of
-// this class only when the Task given to Destroy() is called.
-//
-class Gles2VideoDecodeContext : public media::VideoDecodeContext {
- public:
- // |message_loop| is the message of the Render Thread.
- // |memory_mapped| determines if textures allocated are memory mapped.
- // |context| is the graphics context for generating textures.
- Gles2VideoDecodeContext(MessageLoop* message_loop,
- bool memory_mapped,
- RendererGLContext* context);
- virtual ~Gles2VideoDecodeContext();
-
- // media::VideoDecodeContext implementation.
- virtual void* GetDevice();
- virtual void AllocateVideoFrames(
- int frames_num, size_t width, size_t height,
- media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task);
- virtual void ReleaseAllVideoFrames();
- virtual void ConvertToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame,
- Task* task);
- virtual void Destroy(Task* task);
-
- // Accessor of the current mode of this decode context.
- bool IsMemoryMapped() const { return memory_mapped_; }
-
- private:
- // Message loop for Render Thread.
- MessageLoop* message_loop_;
-
- // Type of storage provided by this class.
- bool memory_mapped_;
-
- // Pointer to the GLES2 context.
- RendererGLContext* context_;
-
- // VideoFrames allocated.
- std::vector<scoped_refptr<media::VideoFrame> > frames_;
-
- DISALLOW_COPY_AND_ASSIGN(Gles2VideoDecodeContext);
-};
-
-#endif // CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
diff --git a/content/renderer/media/ipc_video_decoder.cc b/content/renderer/media/ipc_video_decoder.cc
deleted file mode 100644
index 7f71312..0000000
--- a/content/renderer/media/ipc_video_decoder.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/ipc_video_decoder.h"
-
-#include "base/task.h"
-#include "content/common/child_process.h"
-#include "content/renderer/gpu/renderer_gl_context.h"
-#include "media/base/callback.h"
-#include "media/base/filters.h"
-#include "media/base/filter_host.h"
-#include "media/base/limits.h"
-#include "media/base/media_format.h"
-#include "media/base/video_frame.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/video/video_decode_engine.h"
-
-IpcVideoDecoder::IpcVideoDecoder(MessageLoop* message_loop,
- RendererGLContext* gl_context)
- : decode_context_message_loop_(message_loop),
- gl_context_(gl_context) {
-}
-
-IpcVideoDecoder::~IpcVideoDecoder() {
-}
-
-void IpcVideoDecoder::Initialize(media::DemuxerStream* demuxer_stream,
- media::FilterCallback* callback,
- media::StatisticsCallback* statsCallback) {
- // It doesn't matter which thread we perform initialization because
- // all this method does is create objects and delegate the initialize
- // messsage.
-
- DCHECK(!demuxer_stream_);
- demuxer_stream_ = demuxer_stream;
- initialize_callback_.reset(callback);
- statistics_callback_.reset(statsCallback);
-
- // We require bit stream converter for hardware decoder.
- demuxer_stream->EnableBitstreamConverter();
-
- AVStream* av_stream = demuxer_stream->GetAVStream();
- if (!av_stream) {
- media::VideoCodecInfo info = {0};
- OnInitializeComplete(info);
- return;
- }
-
- int width = av_stream->codec->coded_width;
- int height = av_stream->codec->coded_height;
-
- int surface_width = media::GetSurfaceWidth(av_stream);
- int surface_height = media::GetSurfaceHeight(av_stream);
-
- if (surface_width > media::Limits::kMaxDimension ||
- surface_height > media::Limits::kMaxDimension ||
- (surface_width * surface_height) > media::Limits::kMaxCanvas) {
- media::VideoCodecInfo info = {0};
- OnInitializeComplete(info);
- return;
- }
-
- // Create a video decode context that assocates with the graphics
- // context.
- decode_context_.reset(
- gl_context_->CreateVideoDecodeContext(
- decode_context_message_loop_, true));
-
- // Create a hardware video decoder handle.
- decode_engine_.reset(gl_context_->CreateVideoDecodeEngine());
-
- media::VideoDecoderConfig config(
- media::CodecIDToVideoCodec(av_stream->codec->codec_id),
- width, height,
- surface_width, surface_height,
- av_stream->r_frame_rate.num,
- av_stream->r_frame_rate.den,
- av_stream->codec->extradata,
- av_stream->codec->extradata_size);
-
- // VideoDecodeEngine will perform initialization on the message loop
- // given to it so it doesn't matter on which thread we are calling this.
- decode_engine_->Initialize(ChildProcess::current()->io_message_loop(), this,
- decode_context_.get(), config);
-}
-
-const media::MediaFormat& IpcVideoDecoder::media_format() {
- return media_format_;
-}
-
-void IpcVideoDecoder::Stop(media::FilterCallback* callback) {
- stop_callback_.reset(callback);
- decode_engine_->Uninitialize();
-}
-
-void IpcVideoDecoder::Pause(media::FilterCallback* callback) {
- // TODO(hclam): It looks like that pause is not necessary so implement this
- // later.
- callback->Run();
- delete callback;
-}
-
-void IpcVideoDecoder::Flush(media::FilterCallback* callback) {
- flush_callback_.reset(callback);
- decode_engine_->Flush();
-}
-
-void IpcVideoDecoder::Seek(base::TimeDelta time,
- const media::FilterStatusCB& cb) {
- seek_cb_ = cb;
- decode_engine_->Seek();
-}
-
-void IpcVideoDecoder::OnInitializeComplete(const media::VideoCodecInfo& info) {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
-
- if (info.success) {
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceType,
- media::VideoFrame::TYPE_GL_TEXTURE);
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat,
- info.stream_info.surface_format);
- media_format_.SetAsInteger(media::MediaFormat::kWidth,
- info.stream_info.surface_width);
- media_format_.SetAsInteger(media::MediaFormat::kHeight,
- info.stream_info.surface_height);
- media_format_.SetAsInteger(
- media::MediaFormat::kSurfaceType,
- static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
- } else {
- LOG(ERROR) << "IpcVideoDecoder initialization failed!";
- host()->SetError(media::PIPELINE_ERROR_DECODE);
- }
-
- initialize_callback_->Run();
- initialize_callback_.reset();
-}
-
-void IpcVideoDecoder::OnUninitializeComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
-
- // After the decode engine is uninitialized we are safe to destroy the decode
- // context. The task will add a refcount to this object so don't need to worry
- // about objects lifetime.
- decode_context_->Destroy(
- NewRunnableMethod(this, &IpcVideoDecoder::OnDestroyComplete));
-
- // We don't need to wait for destruction of decode context to complete because
- // it can happen asynchronously. This object and decode context will live
- // until the destruction task is called.
- stop_callback_->Run();
- stop_callback_.reset();
-}
-
-void IpcVideoDecoder::OnFlushComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- flush_callback_->Run();
- flush_callback_.reset();
-}
-
-void IpcVideoDecoder::OnSeekComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- ResetAndRunCB(&seek_cb_, media::PIPELINE_OK);
-}
-
-void IpcVideoDecoder::OnError() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- host()->SetError(media::PIPELINE_ERROR_DECODE);
-}
-
-// This methid is called by Demuxer after a demuxed packet is produced.
-void IpcVideoDecoder::OnReadComplete(media::Buffer* buffer) {
- decode_engine_->ConsumeVideoSample(buffer);
-}
-
-void IpcVideoDecoder::OnDestroyComplete() {
- // We don't need to do anything in this method. Destruction of objects will
- // occur as soon as refcount goes to 0.
-}
-
-// This method is called by VideoRenderer. We delegate the method call to
-// VideoDecodeEngine.
-void IpcVideoDecoder::ProduceVideoFrame(
- scoped_refptr<media::VideoFrame> video_frame) {
- decode_engine_->ProduceVideoFrame(video_frame);
-}
-
-bool IpcVideoDecoder::ProvidesBuffer() {
- return true;
-}
-
-// This method is called by VideoDecodeEngine that a video frame is produced.
-// This is then passed to VideoRenderer.
-void IpcVideoDecoder::ConsumeVideoFrame(
- scoped_refptr<media::VideoFrame> video_frame,
- const media::PipelineStatistics& statistics) {
- DCHECK(video_frame);
- statistics_callback_->Run(statistics);
-
- VideoFrameReady(video_frame);
-}
-
-// This method is called by VideoDecodeEngine to request a video frame. The
-// request is passed to demuxer.
-void IpcVideoDecoder::ProduceVideoSample(scoped_refptr<media::Buffer> buffer) {
- demuxer_stream_->Read(base::Bind(&IpcVideoDecoder::OnReadComplete, this));
-}
diff --git a/content/renderer/media/ipc_video_decoder.h b/content/renderer/media/ipc_video_decoder.h
deleted file mode 100644
index 280014b..0000000
--- a/content/renderer/media/ipc_video_decoder.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
-#define CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
-
-#include "base/time.h"
-#include "media/base/pts_heap.h"
-#include "media/base/video_frame.h"
-#include "media/filters/decoder_base.h"
-#include "media/video/video_decode_engine.h"
-#include "media/video/video_decode_context.h"
-
-struct AVRational;
-class RendererGLContext;
-
-class IpcVideoDecoder : public media::VideoDecoder,
- public media::VideoDecodeEngine::EventHandler {
- public:
- IpcVideoDecoder(MessageLoop* message_loop, RendererGLContext* gl_context);
- virtual ~IpcVideoDecoder();
-
- // media::Filter implementation.
- virtual void Stop(media::FilterCallback* callback);
- virtual void Seek(base::TimeDelta time, const media::FilterStatusCB& cb);
- virtual void Pause(media::FilterCallback* callback);
- virtual void Flush(media::FilterCallback* callback);
-
- // media::VideoDecoder implementation.
- virtual void Initialize(media::DemuxerStream* demuxer_stream,
- media::FilterCallback* callback,
- media::StatisticsCallback* statsCallback);
- virtual const media::MediaFormat& media_format();
- virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
-
- // TODO(hclam): Remove this method.
- virtual bool ProvidesBuffer();
-
- // media::VideoDecodeEngine::EventHandler implementation.
- virtual void OnInitializeComplete(const media::VideoCodecInfo& info);
- virtual void OnUninitializeComplete();
- virtual void OnFlushComplete();
- virtual void OnSeekComplete();
- virtual void OnError();
-
- // TODO(hclam): Remove this method.
- virtual void OnFormatChange(media::VideoStreamInfo stream_info) {}
- virtual void ProduceVideoSample(scoped_refptr<media::Buffer> buffer);
- virtual void ConsumeVideoFrame(scoped_refptr<media::VideoFrame> frame,
- const media::PipelineStatistics& statistics);
-
- private:
- void OnReadComplete(media::Buffer* buffer);
- void OnDestroyComplete();
-
- media::MediaFormat media_format_;
-
- scoped_ptr<media::FilterCallback> flush_callback_;
- media::FilterStatusCB seek_cb_;
- scoped_ptr<media::FilterCallback> initialize_callback_;
- scoped_ptr<media::FilterCallback> stop_callback_;
- scoped_ptr<media::StatisticsCallback> statistics_callback_;
-
- // Pointer to the demuxer stream that will feed us compressed buffers.
- scoped_refptr<media::DemuxerStream> demuxer_stream_;
-
- // This is the message loop that we should assign to VideoDecodeContext.
- MessageLoop* decode_context_message_loop_;
-
- // A context for allocating textures and issuing GLES2 commands.
- // TODO(hclam): A RendererGLContext lives on the Render Thread while this
- // object lives on the Video Decoder Thread, we need to take care of context
- // lost and destruction of the context.
- RendererGLContext* gl_context_;
-
- // This VideoDecodeEngine translate our requests to IPC commands to the
- // GPU process.
- // VideoDecodeEngine should run on IO Thread instead of Render Thread to
- // avoid dead lock during tear down of the media pipeline.
- scoped_ptr<media::VideoDecodeEngine> decode_engine_;
-
- // Decoding context to be used by VideoDecodeEngine.
- scoped_ptr<media::VideoDecodeContext> decode_context_;
-
- DISALLOW_COPY_AND_ASSIGN(IpcVideoDecoder);
-};
-
-#endif // CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
diff --git a/content/renderer/render_view.cc b/content/renderer/render_view.cc
index 96514d4..048da27 100644
--- a/content/renderer/render_view.cc
+++ b/content/renderer/render_view.cc
@@ -47,7 +47,6 @@
#include "content/renderer/gpu/webgraphicscontext3d_command_buffer_impl.h"
#include "content/renderer/load_progress_tracker.h"
#include "content/renderer/media/audio_renderer_impl.h"
-#include "content/renderer/media/ipc_video_decoder.h"
#include "content/renderer/navigation_state.h"
#include "content/renderer/notification_provider.h"
#include "content/renderer/p2p/socket_dispatcher.h"
@@ -1885,23 +1884,6 @@ WebMediaPlayer* RenderView::createMediaPlayer(
collection->AddAudioRenderer(new AudioRendererImpl(audio_message_filter()));
}
- if (cmd_line->HasSwitch(switches::kEnableAcceleratedDecoding) &&
- !cmd_line->HasSwitch(switches::kDisableAcceleratedCompositing)) {
- WebGraphicsContext3DCommandBufferImpl* context =
- static_cast<WebGraphicsContext3DCommandBufferImpl*>(
- frame->view()->graphicsContext3D());
- if (!context)
- return NULL;
-
- // Add the hardware video decoder factory.
- // TODO(hclam): This will cause the renderer process to crash on context
- // lost.
- bool ret = context->makeContextCurrent();
- CHECK(ret) << "Failed to switch context";
- collection->AddVideoDecoder(new IpcVideoDecoder(
- MessageLoop::current(), context->context()));
- }
-
scoped_refptr<webkit_glue::WebVideoRenderer> video_renderer;
bool pts_logging = cmd_line->HasSwitch(switches::kEnableVideoLogging);
scoped_refptr<webkit_glue::VideoRendererImpl> renderer(