summaryrefslogtreecommitdiffstats
path: root/content/renderer/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 20:06:02 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-03 20:06:02 +0000
commitd0cef8b4da166d1835f7e4caf6021cce978e2ddd (patch)
treec26c6b2d093e580f536029163cf6956f97efc541 /content/renderer/media
parentf2fcf4df7b83cba250b64877116038eb0d285c0a (diff)
downloadchromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.zip
chromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.tar.gz
chromium_src-d0cef8b4da166d1835f7e4caf6021cce978e2ddd.tar.bz2
Removing defunct GpuVideoDecoder and IpcVideoDecoder.
These haven't been used in quite some time and have been replaced by the newer VideoDecoderAccelerator set of classes. BUG=none TEST=the world still compiles Review URL: http://codereview.chromium.org/6993016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@87841 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer/media')
-rw-r--r--content/renderer/media/gles2_video_decode_context.cc122
-rw-r--r--content/renderer/media/gles2_video_decode_context.h112
-rw-r--r--content/renderer/media/ipc_video_decoder.cc207
-rw-r--r--content/renderer/media/ipc_video_decoder.h89
4 files changed, 0 insertions, 530 deletions
diff --git a/content/renderer/media/gles2_video_decode_context.cc b/content/renderer/media/gles2_video_decode_context.cc
deleted file mode 100644
index 8b74e23..0000000
--- a/content/renderer/media/gles2_video_decode_context.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <GLES2/gl2.h>
-
-#include "base/message_loop.h"
-#include "content/renderer/gpu/renderer_gl_context.h"
-#include "content/renderer/media/gles2_video_decode_context.h"
-
-Gles2VideoDecodeContext::Gles2VideoDecodeContext(
- MessageLoop* message_loop, bool memory_mapped, RendererGLContext* context)
- : message_loop_(message_loop),
- memory_mapped_(memory_mapped),
- context_(context) {
-}
-
-Gles2VideoDecodeContext::~Gles2VideoDecodeContext() {
-}
-
-void* Gles2VideoDecodeContext::GetDevice() {
- // This decode context is used inside the renderer and so hardware decoder
- // device handler should not be used.
- return NULL;
-}
-
-void Gles2VideoDecodeContext::AllocateVideoFrames(
- int num_frames, size_t width, size_t height,
- media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &Gles2VideoDecodeContext::AllocateVideoFrames,
- num_frames, width, height, format, frames_out,
- task));
- return;
- }
-
- // In this method we need to make the context current and then generate
- // textures for each video frame. We also need to allocate memory for each
- // texture generated.
- bool ret = RendererGLContext::MakeCurrent(context_);
- CHECK(ret) << "Failed to switch context";
-
- frames_.resize(num_frames);
- for (int i = 0; i < num_frames; ++i) {
- int planes = media::VideoFrame::GetNumberOfPlanes(format);
- media::VideoFrame::GlTexture textures[media::VideoFrame::kMaxPlanes];
-
- // Set the color format of the textures.
- DCHECK(format == media::VideoFrame::RGBA ||
- format == media::VideoFrame::YV12);
- int gl_format = format == media::VideoFrame::RGBA ? GL_RGBA : GL_LUMINANCE;
-
- glGenTextures(planes, textures);
- for (int j = 0; j < planes; ++j) {
- glBindTexture(GL_TEXTURE_2D, textures[j]);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTexImage2D(GL_TEXTURE_2D, 0, gl_format, width, height, 0, gl_format,
- GL_UNSIGNED_BYTE, NULL);
- }
- glFlush();
-
- scoped_refptr<media::VideoFrame> frame;
- media::VideoFrame::CreateFrameGlTexture(format, width, height, textures,
- &frame);
- frames_[i] = frame;
- }
- *frames_out = frames_;
-
- task->Run();
- delete task;
-}
-
-void Gles2VideoDecodeContext::ReleaseAllVideoFrames() {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this,
- &Gles2VideoDecodeContext::ReleaseAllVideoFrames));
- return;
- }
-
- // Make the context current and then release the video frames.
- bool ret = RendererGLContext::MakeCurrent(context_);
- CHECK(ret) << "Failed to switch context";
-
- for (size_t i = 0; i < frames_.size(); ++i) {
- for (size_t j = 0; j < frames_[i]->planes(); ++j) {
- media::VideoFrame::GlTexture texture = frames_[i]->gl_texture(j);
- glDeleteTextures(1, &texture);
- }
- }
- frames_.clear();
-}
-
-void Gles2VideoDecodeContext::ConvertToVideoFrame(
- void* buffer, scoped_refptr<media::VideoFrame> frame, Task* task) {
- DCHECK(memory_mapped_);
- // TODO(hclam): Implement.
-}
-
-void Gles2VideoDecodeContext::Destroy(Task* task) {
- if (MessageLoop::current() != message_loop_) {
- message_loop_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &Gles2VideoDecodeContext::Destroy, task));
- return;
- }
-
- ReleaseAllVideoFrames();
- DCHECK_EQ(0u, frames_.size());
-
- task->Run();
- delete task;
-}
-
-DISABLE_RUNNABLE_METHOD_REFCOUNT(Gles2VideoDecodeContext);
diff --git a/content/renderer/media/gles2_video_decode_context.h b/content/renderer/media/gles2_video_decode_context.h
deleted file mode 100644
index 70b7f71..0000000
--- a/content/renderer/media/gles2_video_decode_context.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
-#define CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
-
-#include <vector>
-
-#include "media/video/video_decode_context.h"
-
-class MessageLoop;
-class RendererGLContext;
-
-// FUNCTIONS
-//
-// This is a class that provides a video decode context using a
-// RendererGLContext backend.
-//
-// It provides resources for a VideoDecodeEngine to store decoded video frames.
-//
-// This class is aware of the command buffer implementation of GLES2 inside the
-// Chrome renderer and keeps a reference of RendererGLContext. It might use
-// GLES2 commands specific to Chrome's renderer process to provide needed
-// resources.
-//
-// There are two different kinds of video frame storage provided by this class:
-// 1. Memory mapped textures (aka software decoding mode).
-// Each texture is memory mapped and appears to the VideoDecodeEngine as
-// system memory.
-//
-// The usage of the textures is that the VideoDecodeEngine is performing
-// software video decoding and use them as if they are allocated in plain
-// system memory (in fact they are allocated in system memory and shared
-// bwith the GPU process). An additional step of uploading the content to
-// video memory is needed. Since VideoDecodeEngine is unaware of the video
-// memory, this upload operation is performed by calling
-// ConvertToVideoFrame().
-//
-// After the content is uploaded to video memory, WebKit will see the video
-// frame as textures and will perform the necessary operations for
-// rendering.
-//
-// 2. Opaque textures (aka hardware decoding mode).
-// In this mode of operation each video frame is backed by some opaque
-// textures. This is used only when hardware video decoding is used. The
-// textures needs to be generated and allocated inside the renderer process
-// first. This will establish a translation between texture ID in the
-// renderer process and the GPU process.
-//
-// The texture ID generated is used by IpcVideoDecodeEngine only to be sent
-// the GPU process. Inside the GPU process the texture ID is translated to
-// a real texture ID inside the actual context. The real texture ID is then
-// assigned to the hardware video decoder for storing the video frame.
-//
-// WebKit will see the video frame as a normal textures and perform
-// necessary render operations.
-//
-// In both operation modes, the objective is to have WebKit see the video frames
-// as regular textures.
-//
-// THREAD SEMANTICS
-//
-// All methods of this class can be called on any thread. GLES2 context and all
-// OpenGL method calls are accessed on the Render Thread. As as result all Tasks
-// given to this object are executed on the Render Thread.
-//
-// Since this class is not refcounted, it is important to destroy objects of
-// this class only when the Task given to Destroy() is called.
-//
-class Gles2VideoDecodeContext : public media::VideoDecodeContext {
- public:
- // |message_loop| is the message of the Render Thread.
- // |memory_mapped| determines if textures allocated are memory mapped.
- // |context| is the graphics context for generating textures.
- Gles2VideoDecodeContext(MessageLoop* message_loop,
- bool memory_mapped,
- RendererGLContext* context);
- virtual ~Gles2VideoDecodeContext();
-
- // media::VideoDecodeContext implementation.
- virtual void* GetDevice();
- virtual void AllocateVideoFrames(
- int frames_num, size_t width, size_t height,
- media::VideoFrame::Format format,
- std::vector<scoped_refptr<media::VideoFrame> >* frames_out, Task* task);
- virtual void ReleaseAllVideoFrames();
- virtual void ConvertToVideoFrame(void* buffer,
- scoped_refptr<media::VideoFrame> frame,
- Task* task);
- virtual void Destroy(Task* task);
-
- // Accessor of the current mode of this decode context.
- bool IsMemoryMapped() const { return memory_mapped_; }
-
- private:
- // Message loop for Render Thread.
- MessageLoop* message_loop_;
-
- // Type of storage provided by this class.
- bool memory_mapped_;
-
- // Pointer to the GLES2 context.
- RendererGLContext* context_;
-
- // VideoFrames allocated.
- std::vector<scoped_refptr<media::VideoFrame> > frames_;
-
- DISALLOW_COPY_AND_ASSIGN(Gles2VideoDecodeContext);
-};
-
-#endif // CONTENT_RENDERER_MEDIA_GLES2_VIDEO_DECODE_CONTEXT_H_
diff --git a/content/renderer/media/ipc_video_decoder.cc b/content/renderer/media/ipc_video_decoder.cc
deleted file mode 100644
index 7f71312..0000000
--- a/content/renderer/media/ipc_video_decoder.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/ipc_video_decoder.h"
-
-#include "base/task.h"
-#include "content/common/child_process.h"
-#include "content/renderer/gpu/renderer_gl_context.h"
-#include "media/base/callback.h"
-#include "media/base/filters.h"
-#include "media/base/filter_host.h"
-#include "media/base/limits.h"
-#include "media/base/media_format.h"
-#include "media/base/video_frame.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/video/video_decode_engine.h"
-
-IpcVideoDecoder::IpcVideoDecoder(MessageLoop* message_loop,
- RendererGLContext* gl_context)
- : decode_context_message_loop_(message_loop),
- gl_context_(gl_context) {
-}
-
-IpcVideoDecoder::~IpcVideoDecoder() {
-}
-
-void IpcVideoDecoder::Initialize(media::DemuxerStream* demuxer_stream,
- media::FilterCallback* callback,
- media::StatisticsCallback* statsCallback) {
- // It doesn't matter which thread we perform initialization because
- // all this method does is create objects and delegate the initialize
- // messsage.
-
- DCHECK(!demuxer_stream_);
- demuxer_stream_ = demuxer_stream;
- initialize_callback_.reset(callback);
- statistics_callback_.reset(statsCallback);
-
- // We require bit stream converter for hardware decoder.
- demuxer_stream->EnableBitstreamConverter();
-
- AVStream* av_stream = demuxer_stream->GetAVStream();
- if (!av_stream) {
- media::VideoCodecInfo info = {0};
- OnInitializeComplete(info);
- return;
- }
-
- int width = av_stream->codec->coded_width;
- int height = av_stream->codec->coded_height;
-
- int surface_width = media::GetSurfaceWidth(av_stream);
- int surface_height = media::GetSurfaceHeight(av_stream);
-
- if (surface_width > media::Limits::kMaxDimension ||
- surface_height > media::Limits::kMaxDimension ||
- (surface_width * surface_height) > media::Limits::kMaxCanvas) {
- media::VideoCodecInfo info = {0};
- OnInitializeComplete(info);
- return;
- }
-
- // Create a video decode context that assocates with the graphics
- // context.
- decode_context_.reset(
- gl_context_->CreateVideoDecodeContext(
- decode_context_message_loop_, true));
-
- // Create a hardware video decoder handle.
- decode_engine_.reset(gl_context_->CreateVideoDecodeEngine());
-
- media::VideoDecoderConfig config(
- media::CodecIDToVideoCodec(av_stream->codec->codec_id),
- width, height,
- surface_width, surface_height,
- av_stream->r_frame_rate.num,
- av_stream->r_frame_rate.den,
- av_stream->codec->extradata,
- av_stream->codec->extradata_size);
-
- // VideoDecodeEngine will perform initialization on the message loop
- // given to it so it doesn't matter on which thread we are calling this.
- decode_engine_->Initialize(ChildProcess::current()->io_message_loop(), this,
- decode_context_.get(), config);
-}
-
-const media::MediaFormat& IpcVideoDecoder::media_format() {
- return media_format_;
-}
-
-void IpcVideoDecoder::Stop(media::FilterCallback* callback) {
- stop_callback_.reset(callback);
- decode_engine_->Uninitialize();
-}
-
-void IpcVideoDecoder::Pause(media::FilterCallback* callback) {
- // TODO(hclam): It looks like that pause is not necessary so implement this
- // later.
- callback->Run();
- delete callback;
-}
-
-void IpcVideoDecoder::Flush(media::FilterCallback* callback) {
- flush_callback_.reset(callback);
- decode_engine_->Flush();
-}
-
-void IpcVideoDecoder::Seek(base::TimeDelta time,
- const media::FilterStatusCB& cb) {
- seek_cb_ = cb;
- decode_engine_->Seek();
-}
-
-void IpcVideoDecoder::OnInitializeComplete(const media::VideoCodecInfo& info) {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
-
- if (info.success) {
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceType,
- media::VideoFrame::TYPE_GL_TEXTURE);
- media_format_.SetAsInteger(media::MediaFormat::kSurfaceFormat,
- info.stream_info.surface_format);
- media_format_.SetAsInteger(media::MediaFormat::kWidth,
- info.stream_info.surface_width);
- media_format_.SetAsInteger(media::MediaFormat::kHeight,
- info.stream_info.surface_height);
- media_format_.SetAsInteger(
- media::MediaFormat::kSurfaceType,
- static_cast<int>(media::VideoFrame::TYPE_GL_TEXTURE));
- } else {
- LOG(ERROR) << "IpcVideoDecoder initialization failed!";
- host()->SetError(media::PIPELINE_ERROR_DECODE);
- }
-
- initialize_callback_->Run();
- initialize_callback_.reset();
-}
-
-void IpcVideoDecoder::OnUninitializeComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
-
- // After the decode engine is uninitialized we are safe to destroy the decode
- // context. The task will add a refcount to this object so don't need to worry
- // about objects lifetime.
- decode_context_->Destroy(
- NewRunnableMethod(this, &IpcVideoDecoder::OnDestroyComplete));
-
- // We don't need to wait for destruction of decode context to complete because
- // it can happen asynchronously. This object and decode context will live
- // until the destruction task is called.
- stop_callback_->Run();
- stop_callback_.reset();
-}
-
-void IpcVideoDecoder::OnFlushComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- flush_callback_->Run();
- flush_callback_.reset();
-}
-
-void IpcVideoDecoder::OnSeekComplete() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- ResetAndRunCB(&seek_cb_, media::PIPELINE_OK);
-}
-
-void IpcVideoDecoder::OnError() {
- DCHECK_EQ(ChildProcess::current()->io_message_loop(), MessageLoop::current());
- host()->SetError(media::PIPELINE_ERROR_DECODE);
-}
-
-// This methid is called by Demuxer after a demuxed packet is produced.
-void IpcVideoDecoder::OnReadComplete(media::Buffer* buffer) {
- decode_engine_->ConsumeVideoSample(buffer);
-}
-
-void IpcVideoDecoder::OnDestroyComplete() {
- // We don't need to do anything in this method. Destruction of objects will
- // occur as soon as refcount goes to 0.
-}
-
-// This method is called by VideoRenderer. We delegate the method call to
-// VideoDecodeEngine.
-void IpcVideoDecoder::ProduceVideoFrame(
- scoped_refptr<media::VideoFrame> video_frame) {
- decode_engine_->ProduceVideoFrame(video_frame);
-}
-
-bool IpcVideoDecoder::ProvidesBuffer() {
- return true;
-}
-
-// This method is called by VideoDecodeEngine that a video frame is produced.
-// This is then passed to VideoRenderer.
-void IpcVideoDecoder::ConsumeVideoFrame(
- scoped_refptr<media::VideoFrame> video_frame,
- const media::PipelineStatistics& statistics) {
- DCHECK(video_frame);
- statistics_callback_->Run(statistics);
-
- VideoFrameReady(video_frame);
-}
-
-// This method is called by VideoDecodeEngine to request a video frame. The
-// request is passed to demuxer.
-void IpcVideoDecoder::ProduceVideoSample(scoped_refptr<media::Buffer> buffer) {
- demuxer_stream_->Read(base::Bind(&IpcVideoDecoder::OnReadComplete, this));
-}
diff --git a/content/renderer/media/ipc_video_decoder.h b/content/renderer/media/ipc_video_decoder.h
deleted file mode 100644
index 280014b..0000000
--- a/content/renderer/media/ipc_video_decoder.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
-#define CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_
-
-#include "base/time.h"
-#include "media/base/pts_heap.h"
-#include "media/base/video_frame.h"
-#include "media/filters/decoder_base.h"
-#include "media/video/video_decode_engine.h"
-#include "media/video/video_decode_context.h"
-
-struct AVRational;
-class RendererGLContext;
-
-class IpcVideoDecoder : public media::VideoDecoder,
- public media::VideoDecodeEngine::EventHandler {
- public:
- IpcVideoDecoder(MessageLoop* message_loop, RendererGLContext* gl_context);
- virtual ~IpcVideoDecoder();
-
- // media::Filter implementation.
- virtual void Stop(media::FilterCallback* callback);
- virtual void Seek(base::TimeDelta time, const media::FilterStatusCB& cb);
- virtual void Pause(media::FilterCallback* callback);
- virtual void Flush(media::FilterCallback* callback);
-
- // media::VideoDecoder implementation.
- virtual void Initialize(media::DemuxerStream* demuxer_stream,
- media::FilterCallback* callback,
- media::StatisticsCallback* statsCallback);
- virtual const media::MediaFormat& media_format();
- virtual void ProduceVideoFrame(scoped_refptr<media::VideoFrame> video_frame);
-
- // TODO(hclam): Remove this method.
- virtual bool ProvidesBuffer();
-
- // media::VideoDecodeEngine::EventHandler implementation.
- virtual void OnInitializeComplete(const media::VideoCodecInfo& info);
- virtual void OnUninitializeComplete();
- virtual void OnFlushComplete();
- virtual void OnSeekComplete();
- virtual void OnError();
-
- // TODO(hclam): Remove this method.
- virtual void OnFormatChange(media::VideoStreamInfo stream_info) {}
- virtual void ProduceVideoSample(scoped_refptr<media::Buffer> buffer);
- virtual void ConsumeVideoFrame(scoped_refptr<media::VideoFrame> frame,
- const media::PipelineStatistics& statistics);
-
- private:
- void OnReadComplete(media::Buffer* buffer);
- void OnDestroyComplete();
-
- media::MediaFormat media_format_;
-
- scoped_ptr<media::FilterCallback> flush_callback_;
- media::FilterStatusCB seek_cb_;
- scoped_ptr<media::FilterCallback> initialize_callback_;
- scoped_ptr<media::FilterCallback> stop_callback_;
- scoped_ptr<media::StatisticsCallback> statistics_callback_;
-
- // Pointer to the demuxer stream that will feed us compressed buffers.
- scoped_refptr<media::DemuxerStream> demuxer_stream_;
-
- // This is the message loop that we should assign to VideoDecodeContext.
- MessageLoop* decode_context_message_loop_;
-
- // A context for allocating textures and issuing GLES2 commands.
- // TODO(hclam): A RendererGLContext lives on the Render Thread while this
- // object lives on the Video Decoder Thread, we need to take care of context
- // lost and destruction of the context.
- RendererGLContext* gl_context_;
-
- // This VideoDecodeEngine translate our requests to IPC commands to the
- // GPU process.
- // VideoDecodeEngine should run on IO Thread instead of Render Thread to
- // avoid dead lock during tear down of the media pipeline.
- scoped_ptr<media::VideoDecodeEngine> decode_engine_;
-
- // Decoding context to be used by VideoDecodeEngine.
- scoped_ptr<media::VideoDecodeContext> decode_context_;
-
- DISALLOW_COPY_AND_ASSIGN(IpcVideoDecoder);
-};
-
-#endif // CONTENT_RENDERER_MEDIA_IPC_VIDEO_DECODER_H_