summaryrefslogtreecommitdiffstats
path: root/media/video
diff options
context:
space:
mode:
Diffstat (limited to 'media/video')
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc1
-rw-r--r--media/video/ffmpeg_video_decode_engine.h1
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc8
-rw-r--r--media/video/omx_video_decode_engine.cc1
-rw-r--r--media/video/omx_video_decode_engine.h1
-rw-r--r--media/video/video_decode_context.h63
-rw-r--r--media/video/video_decode_engine.h17
7 files changed, 68 insertions, 24 deletions
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 75be752..7a8181b 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -35,6 +35,7 @@ FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
void FFmpegVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
allocator_.reset(new FFmpegVideoAllocator());
diff --git a/media/video/ffmpeg_video_decode_engine.h b/media/video/ffmpeg_video_decode_engine.h
index bc1e033..a7ce4e4 100644
--- a/media/video/ffmpeg_video_decode_engine.h
+++ b/media/video/ffmpeg_video_decode_engine.h
@@ -28,6 +28,7 @@ class FFmpegVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
index 7db2f55..9f737ba 100644
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/video/ffmpeg_video_decode_engine_unittest.cc
@@ -91,7 +91,7 @@ class FFmpegVideoDecodeEngineTest : public testing::Test,
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_TRUE(info_.success);
}
@@ -143,7 +143,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -165,7 +165,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
@@ -188,7 +188,7 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
- test_engine_->Initialize(MessageLoop::current(), this, config_);
+ test_engine_->Initialize(MessageLoop::current(), this, NULL, config_);
EXPECT_FALSE(info_.success);
}
diff --git a/media/video/omx_video_decode_engine.cc b/media/video/omx_video_decode_engine.cc
index e1bcc7f..0df0f1e 100644
--- a/media/video/omx_video_decode_engine.cc
+++ b/media/video/omx_video_decode_engine.cc
@@ -82,6 +82,7 @@ static void ResetParamHeader(const OmxVideoDecodeEngine& dec, T* param) {
void OmxVideoDecodeEngine::Initialize(
MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) {
DCHECK_EQ(message_loop, MessageLoop::current());
diff --git a/media/video/omx_video_decode_engine.h b/media/video/omx_video_decode_engine.h
index 8347eed..c5b3882 100644
--- a/media/video/omx_video_decode_engine.h
+++ b/media/video/omx_video_decode_engine.h
@@ -28,6 +28,7 @@ class OmxVideoDecodeEngine : public VideoDecodeEngine {
// Implementation of the VideoDecodeEngine Interface.
virtual void Initialize(MessageLoop* message_loop,
VideoDecodeEngine::EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config);
virtual void ConsumeVideoSample(scoped_refptr<Buffer> buffer);
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> frame);
diff --git a/media/video/video_decode_context.h b/media/video/video_decode_context.h
index 0bea382..f768a0a 100644
--- a/media/video/video_decode_context.h
+++ b/media/video/video_decode_context.h
@@ -5,20 +5,30 @@
#ifndef MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
#define MEDIA_VIDEO_VIDEO_DECODE_CONTEXT_H_
-#include "base/callback.h"
+#include <vector>
+
+#include "base/task.h"
+#include "media/base/video_frame.h"
namespace media {
class VideoFrame;
-// A VideoDecodeContext provides resources like output video frame storage and
-// hardware decoder handle to a VideoDecodeEngine, it hides all the platform and
-// subsystem details from the decode engine.
+// A VideoDecodeContext is used by VideoDecodeEngine to provide the following
+// functions:
+//
+// 1. Provides access to hardware video decoding device.
+// 2. Allocate VideoFrame objects that are used to carry the decoded video
+// frames.
+// 3. Upload a device specific buffer to some common VideoFrame storage types.
+// In many cases a VideoDecodeEngine provides its own buffer, these buffer
+// are usually device specific and a conversion step is needed. Instead of
+// handling these many cases in the renderer a VideoDecodeContext is used
+// to convert the device specific buffer to a common storage format, e.g.
+// GL textures or system memory. This way we keep the device specific code
+// in the VideoDecodeEngine and VideoDecodeContext pair.
class VideoDecodeContext {
public:
- typedef Callback2<int, VideoFrame*[]>::Type AllocationCompleteCallback;
- typedef Callback0::Type DestructionCompleteCallback;
-
virtual ~VideoDecodeContext() {};
// Obtain a handle to the hardware video decoder device. The type of the
@@ -28,22 +38,45 @@ class VideoDecodeContext {
// If a hardware device is not needed this method should return NULL.
virtual void* GetDevice() = 0;
- // Allocate |n| video frames with dimension |width| and |height|. |callback|
+ // Allocate |n| video frames with dimension |width| and |height|. |task|
// is called when allocation has completed.
- virtual void AllocateVideoFrames(int n, size_t width, size_t height,
- AllocationCompleteCallback* callback) = 0;
+ //
+ // |frames| is the output parameter for VideFrame(s) allocated.
+ virtual void AllocateVideoFrames(
+ int n, size_t width, size_t height, VideoFrame::Format format,
+ std::vector<scoped_refptr<VideoFrame> >* frames,
+ Task* task) = 0;
- // Release video frames allocated by the context. After making this call
+ // Release all video frames allocated by the context. After making this call
// VideoDecodeEngine should not use the VideoFrame allocated because they
// could be destroyed.
- virtual void ReleaseVideoFrames(int n, VideoFrame* frames) = 0;
+ virtual void ReleaseAllVideoFrames() = 0;
+
+ // Upload a device specific buffer to a video frame. The video frame was
+ // allocated via AllocateVideoFrames().
+ // This method is used if a VideoDecodeEngine cannot write directly to a
+ // VideoFrame, e.g. upload should be done on a different thread, the subsystem
+ // require some special treatment to generate a VideoFrame. The goal is to
+ // keep VideoDecodeEngine a reusable component and also adapt to different
+ // system by having a different VideoDecodeContext.
+ //
+ // |frame| is a VideoFrame allocated via AllocateVideoFrames().
+ //
+ // |buffer| is of type void*, it is of an internal type in VideoDecodeEngine
+ // that points to the buffer that contains the video frame.
+ // Implementor should know how to handle it.
+ //
+ // |task| is executed if the operation was completed successfully.
+ // TODO(hclam): Rename this to ConvertToVideoFrame().
+ virtual void UploadToVideoFrame(void* buffer, scoped_refptr<VideoFrame> frame,
+ Task* task) = 0;
- // Destroy this context asynchronously. When the operation is done |callback|
+ // Destroy this context asynchronously. When the operation is done |task|
// is called.
//
// ReleaseVideoFrames() need to be called with all the video frames allocated
- // before making this call.
- virtual void Destroy(DestructionCompleteCallback* callback) = 0;
+ // before making this call.
+ virtual void Destroy(Task* task) = 0;
};
} // namespace media
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index 8736890..f9381a2 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -12,6 +12,7 @@
namespace media {
class Buffer;
+class VideoDecodeContext;
enum VideoCodec {
kCodecH264,
@@ -116,13 +117,19 @@ class VideoDecodeEngine {
virtual ~VideoDecodeEngine() {}
- // Initialized the engine with specified configuration. |message_loop| could
- // be NULL if every operation is synchronous. Engine should call the
- // EventHandler::OnInitializeDone() no matter finished successfully or not.
- // TODO(jiesun): remove message_loop and create thread inside openmax engine?
- // or create thread in GpuVideoDecoder and pass message loop here?
+ // Initialize the engine with specified configuration.
+ //
+ // |decode_context| is used for allocation of VideoFrame.
+ // It is important that |decode_context| is called only on |message_loop|.
+ //
+ // TODO(hclam): Currently refactoring code to use VideoDecodeContext so
+ // |context| may be NULL in some cases.
+ //
+ // Engine should call EventHandler::OnInitializeDone() whether the
+ // initialization operation finished successfully or not.
virtual void Initialize(MessageLoop* message_loop,
EventHandler* event_handler,
+ VideoDecodeContext* context,
const VideoCodecConfig& config) = 0;
// Uninitialize the engine. Engine should destroy all resources and call