summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-13 15:53:30 +0000
committerhclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-09-13 15:53:30 +0000
commit768708171bb6c8dec34509c11460d3bc355a3e81 (patch)
tree0759c4e4c4fc9bd6d115e5e4ade74f81225756ff /media
parent8f8b6b856f85ec92504ca3e48ea4e0d0886ac7b2 (diff)
downloadchromium_src-768708171bb6c8dec34509c11460d3bc355a3e81.zip
chromium_src-768708171bb6c8dec34509c11460d3bc355a3e81.tar.gz
chromium_src-768708171bb6c8dec34509c11460d3bc355a3e81.tar.bz2
Added FakeGlVideoDecodeEngine to exercise the IPC protocol for hardware video decoding
There are several things done in this patch: 1. Added FakeGlVideoDecodeEngine 2. Fixed style problem in VideoDecodeEngine and gpu_video_common.h 3. Added route to pass texture from gpu process to WebKit BUG=53714 TEST=Tree is green Review URL: http://codereview.chromium.org/3335014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@59223 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/video_frame.cc68
-rw-r--r--media/base/video_frame.h40
-rw-r--r--media/filters/ffmpeg_video_decoder.cc28
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc2
-rw-r--r--media/filters/omx_video_decoder.cc26
-rw-r--r--media/mf/mft_h264_decoder.cc36
-rw-r--r--media/mf/mft_h264_decoder_example.cc6
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc62
-rw-r--r--media/tools/omx_test/omx_test.cc16
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc20
-rw-r--r--media/video/ffmpeg_video_decode_engine_unittest.cc40
-rw-r--r--media/video/omx_video_decode_engine.cc16
-rw-r--r--media/video/video_decode_engine.h42
13 files changed, 256 insertions, 146 deletions
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index b4fb5cf..8e3b245 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -8,6 +8,25 @@
namespace media {
+static size_t GetNumberOfPlanes(VideoFrame::Format format) {
+ switch (format) {
+ case VideoFrame::RGB555:
+ case VideoFrame::RGB565:
+ case VideoFrame::RGB24:
+ case VideoFrame::RGB32:
+ case VideoFrame::RGBA:
+ case VideoFrame::ASCII:
+ return VideoFrame::kNumRGBPlanes;
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ return VideoFrame::kNumYUVPlanes;
+ case VideoFrame::NV12:
+ return VideoFrame::kNumNV12Planes;
+ default:
+ return 0;
+ }
+}
+
// static
void VideoFrame::CreateFrame(VideoFrame::Format format,
size_t width,
@@ -52,6 +71,7 @@ void VideoFrame::CreateFrame(VideoFrame::Format format,
*frame_out = alloc_worked ? frame : NULL;
}
+// static
void VideoFrame::CreateFrameExternal(SurfaceType type,
Format format,
size_t width,
@@ -81,6 +101,52 @@ void VideoFrame::CreateFrameExternal(SurfaceType type,
}
// static
+void VideoFrame::CreateFrameGlTexture(Format format,
+ size_t width,
+ size_t height,
+ GlTexture const textures[kMaxPlanes],
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ scoped_refptr<VideoFrame>* frame_out) {
+ DCHECK(frame_out);
+ scoped_refptr<VideoFrame> frame =
+ new VideoFrame(TYPE_GL_TEXTURE, format, width, height);
+ if (frame) {
+ frame->SetTimestamp(timestamp);
+ frame->SetDuration(duration);
+ frame->external_memory_ = true;
+ frame->planes_ = GetNumberOfPlanes(format);
+ for (size_t i = 0; i < kMaxPlanes; ++i) {
+ frame->gl_textures_[i] = textures[i];
+ }
+ }
+ *frame_out = frame;
+}
+
+// static
+void VideoFrame::CreateFrameD3dTexture(Format format,
+ size_t width,
+ size_t height,
+ D3dTexture const textures[kMaxPlanes],
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ scoped_refptr<VideoFrame>* frame_out) {
+ DCHECK(frame_out);
+ scoped_refptr<VideoFrame> frame =
+ new VideoFrame(TYPE_D3D_TEXTURE, format, width, height);
+ if (frame) {
+ frame->SetTimestamp(timestamp);
+ frame->SetDuration(duration);
+ frame->external_memory_ = true;
+ frame->planes_ = GetNumberOfPlanes(format);
+ for (size_t i = 0; i < kMaxPlanes; ++i) {
+ frame->d3d_textures_[i] = textures[i];
+ }
+ }
+ *frame_out = frame;
+}
+
+// static
void VideoFrame::CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out) {
*frame_out = new VideoFrame(VideoFrame::TYPE_SYSTEM_MEMORY,
VideoFrame::EMPTY, 0, 0);
@@ -192,6 +258,8 @@ VideoFrame::VideoFrame(VideoFrame::SurfaceType type,
planes_ = 0;
memset(&strides_, 0, sizeof(strides_));
memset(&data_, 0, sizeof(data_));
+ memset(&gl_textures_, 0, sizeof(gl_textures_));
+ memset(&d3d_textures_, 0, sizeof(d3d_textures_));
external_memory_ = false;
private_buffer_ = NULL;
}
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 0eb38fb..88c11dd 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -17,6 +17,7 @@ class VideoFrame : public StreamSample {
static const size_t kRGBPlane = 0;
static const size_t kNumYUVPlanes = 3;
+ static const size_t kNumNV12Planes = 2;
static const size_t kYPlane = 0;
static const size_t kUPlane = 1;
static const size_t kVPlane = 2;
@@ -50,7 +51,13 @@ class VideoFrame : public StreamSample {
TYPE_D3D_TEXTURE,
};
- public:
+ // Defines a new type for GL texture so we don't include OpenGL headers.
+ typedef unsigned int GlTexture;
+
+ // Defines a new type for D3D texture so we don't include D3D headers and
+ // don't need to bind to a specific version of D3D.
+ typedef void* D3dTexture;
+
// Creates a new frame in system memory with given parameters. Buffers for
// the frame are allocated but not initialized.
static void CreateFrame(Format format,
@@ -75,6 +82,24 @@ class VideoFrame : public StreamSample {
void* private_buffer,
scoped_refptr<VideoFrame>* frame_out);
+ // Creates a new frame with GL textures.
+ static void CreateFrameGlTexture(Format format,
+ size_t width,
+ size_t height,
+ GlTexture const textures[kMaxPlanes],
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ scoped_refptr<VideoFrame>* frame_out);
+
+ // Creates a new frame with D3d textures.
+ static void CreateFrameD3dTexture(Format format,
+ size_t width,
+ size_t height,
+ D3dTexture const textures[kMaxPlanes],
+ base::TimeDelta timestamp,
+ base::TimeDelta duration,
+ scoped_refptr<VideoFrame>* frame_out);
+
// Creates a frame with format equals to VideoFrame::EMPTY, width, height
// timestamp and duration are all 0.
static void CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out);
@@ -98,8 +123,15 @@ class VideoFrame : public StreamSample {
// Returns pointer to the buffer for a given plane. The memory is owned by
// VideoFrame object and must not be freed by the caller.
+ // TODO(hclam): Use union together with |gl_texture| and |d3d_texture|.
uint8* data(size_t plane) const { return data_[plane]; }
+ // Returns the GL texture for a given plane.
+ GlTexture gl_texture(size_t plane) const { return gl_textures_[plane]; }
+
+ // Returns the D3D texture for a given plane.
+ D3dTexture d3d_texture(size_t plane) const { return d3d_textures_[plane]; }
+
void* private_buffer() const { return private_buffer_; }
// StreamSample interface.
@@ -140,6 +172,12 @@ class VideoFrame : public StreamSample {
// Array of data pointers to each plane.
uint8* data_[kMaxPlanes];
+ // Array fo GL textures.
+ GlTexture gl_textures_[kMaxPlanes];
+
+ // Array for D3D textures.
+ D3dTexture d3d_textures_[kMaxPlanes];
+
// True of memory referenced by |data_| is provided externally and shouldn't
// be deleted.
bool external_memory_;
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index ca509f9..6faa410e 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -78,23 +78,23 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
VideoCodecConfig config;
switch (av_stream->codec->codec_id) {
case CODEC_ID_VC1:
- config.codec_ = kCodecVC1; break;
+ config.codec = kCodecVC1; break;
case CODEC_ID_H264:
- config.codec_ = kCodecH264; break;
+ config.codec = kCodecH264; break;
case CODEC_ID_THEORA:
- config.codec_ = kCodecTheora; break;
+ config.codec = kCodecTheora; break;
case CODEC_ID_MPEG2VIDEO:
- config.codec_ = kCodecMPEG2; break;
+ config.codec = kCodecMPEG2; break;
case CODEC_ID_MPEG4:
- config.codec_ = kCodecMPEG4; break;
+ config.codec = kCodecMPEG4; break;
case CODEC_ID_VP8:
- config.codec_ = kCodecVP8; break;
+ config.codec = kCodecVP8; break;
default:
NOTREACHED();
}
- config.opaque_context_ = av_stream;
- config.width_ = width_;
- config.height_ = height_;
+ config.opaque_context = av_stream;
+ config.width = width_;
+ config.height = height_;
decode_engine_->Initialize(message_loop(), this, config);
}
@@ -104,17 +104,17 @@ void FFmpegVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
info_ = info; // Save a copy.
- if (info.success_) {
+ if (info.success) {
media_format_.SetAsString(MediaFormat::kMimeType,
mime_type::kUncompressedVideo);
media_format_.SetAsInteger(MediaFormat::kWidth, width_);
media_format_.SetAsInteger(MediaFormat::kHeight, height_);
media_format_.SetAsInteger(
MediaFormat::kSurfaceType,
- static_cast<int>(info.stream_info_.surface_type_));
+ static_cast<int>(info.stream_info.surface_type));
media_format_.SetAsInteger(
MediaFormat::kSurfaceFormat,
- static_cast<int>(info.stream_info_.surface_format_));
+ static_cast<int>(info.stream_info.surface_format));
state_ = kNormal;
} else {
host()->SetError(PIPELINE_ERROR_DECODE);
@@ -410,8 +410,8 @@ FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration(
}
bool FFmpegVideoDecoder::ProvidesBuffer() {
- DCHECK(info_.success_);
- return info_.provides_buffers_;
+ DCHECK(info_.success);
+ return info_.provides_buffers;
}
void FFmpegVideoDecoder::FlushBuffers() {
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 381b277..6f2eb95 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -87,7 +87,7 @@ class DecoderPrivateMock : public FFmpegVideoDecoder {
ACTION_P2(EngineInitialize, engine, success) {
engine->event_handler_ = arg1;
- engine->info_.success_ = success;
+ engine->info_.success = success;
engine->event_handler_->OnInitializeComplete(engine->info_);
}
diff --git a/media/filters/omx_video_decoder.cc b/media/filters/omx_video_decoder.cc
index adb2c9c..80a5a13 100644
--- a/media/filters/omx_video_decoder.cc
+++ b/media/filters/omx_video_decoder.cc
@@ -95,21 +95,21 @@ void OmxVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
VideoCodecConfig config;
switch (av_stream->codec->codec_id) {
case CODEC_ID_VC1:
- config.codec_ = kCodecVC1; break;
+ config.codec = kCodecVC1; break;
case CODEC_ID_H264:
- config.codec_ = kCodecH264; break;
+ config.codec = kCodecH264; break;
case CODEC_ID_THEORA:
- config.codec_ = kCodecTheora; break;
+ config.codec = kCodecTheora; break;
case CODEC_ID_MPEG2VIDEO:
- config.codec_ = kCodecMPEG2; break;
+ config.codec = kCodecMPEG2; break;
case CODEC_ID_MPEG4:
- config.codec_ = kCodecMPEG4; break;
+ config.codec = kCodecMPEG4; break;
default:
NOTREACHED();
}
- config.opaque_context_ = NULL;
- config.width_ = width_;
- config.height_ = height_;
+ config.opaque_context = NULL;
+ config.width = width_;
+ config.height = height_;
omx_engine_->Initialize(message_loop(), this, config);
}
@@ -120,17 +120,17 @@ void OmxVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
info_ = info; // Save a copy.
AutoCallbackRunner done_runner(initialize_callback_.release());
- if (info.success_) {
+ if (info.success) {
media_format_.SetAsString(MediaFormat::kMimeType,
mime_type::kUncompressedVideo);
media_format_.SetAsInteger(MediaFormat::kWidth, width_);
media_format_.SetAsInteger(MediaFormat::kHeight, height_);
media_format_.SetAsInteger(
MediaFormat::kSurfaceType,
- static_cast<int>(info.stream_info_.surface_type_));
+ static_cast<int>(info.stream_info.surface_type));
media_format_.SetAsInteger(
MediaFormat::kSurfaceFormat,
- static_cast<int>(info.stream_info_.surface_format_));
+ static_cast<int>(info.stream_info.surface_format));
} else {
host()->SetError(PIPELINE_ERROR_DECODE);
}
@@ -236,8 +236,8 @@ void OmxVideoDecoder::ProduceVideoFrame(scoped_refptr<VideoFrame> frame) {
}
bool OmxVideoDecoder::ProvidesBuffer() {
- DCHECK(info_.success_);
- return info_.provides_buffers_;
+ DCHECK(info_.success);
+ return info_.provides_buffers;
}
void OmxVideoDecoder::DemuxCompleteTask(Buffer* buffer) {
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index 667645c..68a42bb 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -172,22 +172,22 @@ void MftH264Decoder::Initialize(
config_ = config;
event_handler_ = event_handler;
- info_.provides_buffers_ = true;
+ info_.provides_buffers = true;
// TODO(jiesun): Actually it is more likely an NV12 D3DSuface9.
// Until we had hardware composition working.
if (use_dxva_) {
- info_.stream_info_.surface_format_ = VideoFrame::YV12;
- info_.stream_info_.surface_type_ = VideoFrame::TYPE_SYSTEM_MEMORY;
+ info_.stream_info.surface_format = VideoFrame::YV12;
+ info_.stream_info.surface_type = VideoFrame::TYPE_SYSTEM_MEMORY;
} else {
- info_.stream_info_.surface_format_ = VideoFrame::YV12;
- info_.stream_info_.surface_type_ = VideoFrame::TYPE_SYSTEM_MEMORY;
+ info_.stream_info.surface_format = VideoFrame::YV12;
+ info_.stream_info.surface_type = VideoFrame::TYPE_SYSTEM_MEMORY;
}
// codec_info.stream_info_.surface_width_/height_ are initialized
// in InitInternal().
- info_.success_ = InitInternal();
- if (info_.success_) {
+ info_.success = InitInternal();
+ if (info_.success) {
state_ = kNormal;
event_handler_->OnInitializeComplete(info_);
} else {
@@ -491,10 +491,10 @@ bool MftH264Decoder::SetDecoderOutputMediaType(const GUID subtype) {
if (out_subtype == subtype) {
hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE,
- reinterpret_cast<UINT32*>(&info_.stream_info_.surface_width_),
- reinterpret_cast<UINT32*>(&info_.stream_info_.surface_height_));
- config_.width_ = info_.stream_info_.surface_width_;
- config_.height_ = info_.stream_info_.surface_height_;
+ reinterpret_cast<UINT32*>(&info_.stream_info.surface_width),
+ reinterpret_cast<UINT32*>(&info_.stream_info.surface_height));
+ config_.width = info_.stream_info.surface_width;
+ config_.height = info_.stream_info.surface_height;
if (FAILED(hr)) {
LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain "
<< "width/height " << std::hex << hr;
@@ -595,7 +595,7 @@ bool MftH264Decoder::DoDecode() {
hr = SetDecoderOutputMediaType(use_dxva_ ? MFVideoFormat_NV12
: MFVideoFormat_YV12);
if (SUCCEEDED(hr)) {
- event_handler_->OnFormatChange(info_.stream_info_);
+ event_handler_->OnFormatChange(info_.stream_info);
return true;
} else {
event_handler_->OnError();
@@ -656,9 +656,9 @@ bool MftH264Decoder::DoDecode() {
return true;
}
- VideoFrame::CreateFrame(info_.stream_info_.surface_format_,
- info_.stream_info_.surface_width_,
- info_.stream_info_.surface_height_,
+ VideoFrame::CreateFrame(info_.stream_info.surface_format,
+ info_.stream_info.surface_width,
+ info_.stream_info.surface_height,
base::TimeDelta::FromMicroseconds(timestamp),
base::TimeDelta::FromMicroseconds(duration),
&frame);
@@ -696,15 +696,15 @@ bool MftH264Decoder::DoDecode() {
}
uint32 src_stride = d3dlocked_rect.Pitch;
- uint32 dst_stride = config_.width_;
+ uint32 dst_stride = config_.width;
uint8* src_y = static_cast<uint8*>(d3dlocked_rect.pBits);
uint8* src_uv = src_y + src_stride * desc.Height;
uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
uint8* dst_u = static_cast<uint8*>(frame->data(VideoFrame::kVPlane));
uint8* dst_v = static_cast<uint8*>(frame->data(VideoFrame::kUPlane));
- for (int y = 0; y < config_.height_; ++y) {
- for (int x = 0; x < config_.width_; ++x) {
+ for (int y = 0; y < config_.height; ++y) {
+ for (int x = 0; x < config_.width; ++x) {
dst_y[x] = src_y[x];
if (!(y & 1)) {
if (x & 1)
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index 0a10e07..e1cc790 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -148,7 +148,7 @@ class MftH264DecoderHandler
virtual void OnSeekComplete() {}
virtual void OnError() {}
virtual void OnFormatChange(VideoStreamInfo stream_info) {
- info_.stream_info_ = stream_info;
+ info_.stream_info = stream_info;
}
virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
if (reader_ && decoder_) {
@@ -284,8 +284,8 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
LOG(WARNING) << "Failed to get width/height from reader";
}
VideoCodecConfig config;
- config.width_ = width;
- config.height_ = height;
+ config.width = width;
+ config.height = height;
HWND window = NULL;
if (render) {
window = CreateDrawWindow(width, height);
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index 80ea7b1..562e7e4 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -125,7 +125,7 @@ class SimpleMftH264DecoderHandler : public VideoDecodeEngine::EventHandler {
virtual void OnError() {}
virtual void OnFormatChange(VideoStreamInfo stream_info) {
format_change_count_++;
- info_.stream_info_ = stream_info;
+ info_.stream_info = stream_info;
}
virtual void ProduceVideoSample(scoped_refptr<Buffer> buffer) {
if (reader_.get() && decoder_) {
@@ -172,8 +172,8 @@ TEST_F(MftH264DecoderTest, DecoderUninitializedAtFirst) {
TEST_F(MftH264DecoderTest, DecoderInitMissingArgs) {
VideoCodecConfig config;
- config.width_ = 800;
- config.height_ = 600;
+ config.width = 800;
+ config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(NULL, NULL, config);
@@ -184,8 +184,8 @@ TEST_F(MftH264DecoderTest, DecoderInitNoDxva) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 800;
- config.height_ = 600;
+ config.width = 800;
+ config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -198,8 +198,8 @@ TEST_F(MftH264DecoderTest, DecoderInitDxva) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 800;
- config.height_ = 600;
+ config.width = 800;
+ config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(true));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -212,8 +212,8 @@ TEST_F(MftH264DecoderTest, DecoderUninit) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 800;
- config.height_ = 600;
+ config.width = 800;
+ config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -227,8 +227,8 @@ TEST_F(MftH264DecoderTest, UninitBeforeInit) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 800;
- config.height_ = 600;
+ config.width = 800;
+ config.height = 600;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Uninitialize();
@@ -239,14 +239,14 @@ TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = -123;
- config.height_ = -456;
+ config.width = -123;
+ config.height = -456;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
- EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info_.surface_width_);
- EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info_.surface_height_);
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
decoder->Uninitialize();
}
@@ -254,14 +254,14 @@ TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = kDecoderMaxWidth + 1;
- config.height_ = kDecoderMaxHeight + 1;
+ config.width = kDecoderMaxWidth + 1;
+ config.height = kDecoderMaxHeight + 1;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
EXPECT_EQ(MftH264Decoder::kNormal, decoder->state());
- EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info_.surface_width_);
- EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info_.surface_height_);
+ EXPECT_EQ(kDecoderMaxWidth, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(kDecoderMaxHeight, handler.info_.stream_info.surface_height);
decoder->Uninitialize();
}
@@ -269,8 +269,8 @@ TEST_F(MftH264DecoderTest, DrainOnEmptyBuffer) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 1024;
- config.height_ = 768;
+ config.width = 1024;
+ config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -298,8 +298,8 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 1024;
- config.height_ = 768;
+ config.width = 1024;
+ config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -326,8 +326,8 @@ TEST_F(MftH264DecoderTest, FlushAtStart) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 1024;
- config.height_ = 768;
+ config.width = 1024;
+ config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -346,8 +346,8 @@ TEST_F(MftH264DecoderTest, NoFlushAtStopped) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 1024;
- config.height_ = 768;
+ config.width = 1024;
+ config.height = 768;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -389,8 +389,8 @@ void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
MessageLoop loop;
SimpleMftH264DecoderHandler handler;
VideoCodecConfig config;
- config.width_ = 1;
- config.height_ = 1;
+ config.width = 1;
+ config.height = 1;
scoped_ptr<MftH264Decoder> decoder(new MftH264Decoder(dxva));
ASSERT_TRUE(decoder.get());
decoder->Initialize(&loop, &handler, config);
@@ -405,8 +405,8 @@ void DecodeValidVideo(const std::string& filename, int num_frames, bool dxva) {
// We expect a format change when decoder receives enough data to determine
// the actual frame width/height.
EXPECT_GT(handler.format_change_count_, 0);
- EXPECT_EQ(actual_width, handler.info_.stream_info_.surface_width_);
- EXPECT_EQ(actual_height, handler.info_.stream_info_.surface_height_);
+ EXPECT_EQ(actual_width, handler.info_.stream_info.surface_width);
+ EXPECT_EQ(actual_height, handler.info_.stream_info.surface_height);
EXPECT_GE(handler.empty_buffer_callback_count_, num_frames);
EXPECT_EQ(num_frames, handler.fill_buffer_callback_count_ - 1);
decoder->Uninitialize();
diff --git a/media/tools/omx_test/omx_test.cc b/media/tools/omx_test/omx_test.cc
index 693c7be..44b728f 100644
--- a/media/tools/omx_test/omx_test.cc
+++ b/media/tools/omx_test/omx_test.cc
@@ -173,21 +173,21 @@ class TestApp : public base::RefCountedThreadSafe<TestApp>,
media::VideoCodecConfig config;
switch (av_stream_->codec->codec_id) {
case CODEC_ID_VC1:
- config.codec_ = media::kCodecVC1; break;
+ config.codec = media::kCodecVC1; break;
case CODEC_ID_H264:
- config.codec_ = media::kCodecH264; break;
+ config.codec = media::kCodecH264; break;
case CODEC_ID_THEORA:
- config.codec_ = media::kCodecTheora; break;
+ config.codec = media::kCodecTheora; break;
case CODEC_ID_MPEG2VIDEO:
- config.codec_ = media::kCodecMPEG2; break;
+ config.codec = media::kCodecMPEG2; break;
case CODEC_ID_MPEG4:
- config.codec_ = media::kCodecMPEG4; break;
+ config.codec = media::kCodecMPEG4; break;
default:
NOTREACHED(); break;
}
- config.opaque_context_ = NULL;
- config.width_ = av_stream_->codec->width;
- config.height_ = av_stream_->codec->height;
+ config.opaque_context = NULL;
+ config.width = av_stream_->codec->width;
+ config.height = av_stream_->codec->height;
engine_.reset(new OmxVideoDecodeEngine());
engine_->Initialize(&message_loop_, this, config);
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 2d35fdc..75be752 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -50,7 +50,7 @@ void FFmpegVideoDecodeEngine::Initialize(
static const int kDecodeThreads = 2;
static const int kMaxDecodeThreads = 16;
- av_stream_ = static_cast<AVStream*>(config.opaque_context_);
+ av_stream_ = static_cast<AVStream*>(config.opaque_context);
codec_context_ = av_stream_->codec;
// Enable motion vector search (potentially slow), strong deblocking filter
// for damaged macroblocks, and set our error detection sensitivity.
@@ -87,12 +87,12 @@ void FFmpegVideoDecodeEngine::Initialize(
// to let FFmpeg allocate the structure via avcodec_alloc_frame().
av_frame_.reset(avcodec_alloc_frame());
VideoCodecInfo info;
- info.success_ = false;
- info.provides_buffers_ = true;
- info.stream_info_.surface_type_ = VideoFrame::TYPE_SYSTEM_MEMORY;
- info.stream_info_.surface_format_ = GetSurfaceFormat();
- info.stream_info_.surface_width_ = config.width_;
- info.stream_info_.surface_height_ = config.height_;
+ info.success = false;
+ info.provides_buffers = true;
+ info.stream_info.surface_type = VideoFrame::TYPE_SYSTEM_MEMORY;
+ info.stream_info.surface_format = GetSurfaceFormat();
+ info.stream_info.surface_width = config.width;
+ info.stream_info.surface_height = config.height;
// If we do not have enough buffers, we will report error too.
bool buffer_allocated = true;
@@ -102,8 +102,8 @@ void FFmpegVideoDecodeEngine::Initialize(
for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
scoped_refptr<VideoFrame> video_frame;
VideoFrame::CreateFrame(VideoFrame::YV12,
- config.width_,
- config.height_,
+ config.width,
+ config.height,
StreamSample::kInvalidTimestamp,
StreamSample::kInvalidTimestamp,
&video_frame);
@@ -120,7 +120,7 @@ void FFmpegVideoDecodeEngine::Initialize(
avcodec_open(codec_context_, codec) >= 0 &&
av_frame_.get() &&
buffer_allocated) {
- info.success_ = true;
+ info.success = true;
}
event_handler_ = event_handler;
event_handler_->OnInitializeComplete(info);
diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc
index 704b251..7db2f55 100644
--- a/media/video/ffmpeg_video_decode_engine_unittest.cc
+++ b/media/video/ffmpeg_video_decode_engine_unittest.cc
@@ -85,14 +85,14 @@ class FFmpegVideoDecodeEngineTest : public testing::Test,
EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_))
.Times(1);
- config_.codec_ = kCodecH264;
- config_.opaque_context_ = &stream_;
- config_.width_ = kWidth;
- config_.height_ = kHeight;
+ config_.codec = kCodecH264;
+ config_.opaque_context = &stream_;
+ config_.width = kWidth;
+ config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
test_engine_->Initialize(MessageLoop::current(), this, config_);
- EXPECT_TRUE(info_.success_);
+ EXPECT_TRUE(info_.success);
}
public:
@@ -137,14 +137,14 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_FindDecoderFails) {
EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_))
.Times(1);
- config_.codec_ = kCodecH264;
- config_.opaque_context_ = &stream_;
- config_.width_ = kWidth;
- config_.height_ = kHeight;
+ config_.codec = kCodecH264;
+ config_.opaque_context = &stream_;
+ config_.width = kWidth;
+ config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
test_engine_->Initialize(MessageLoop::current(), this, config_);
- EXPECT_FALSE(info_.success_);
+ EXPECT_FALSE(info_.success);
}
// Note There are 2 threads for FFmpeg-mt.
@@ -159,14 +159,14 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_InitThreadFails) {
EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_))
.Times(1);
- config_.codec_ = kCodecH264;
- config_.opaque_context_ = &stream_;
- config_.width_ = kWidth;
- config_.height_ = kHeight;
+ config_.codec = kCodecH264;
+ config_.opaque_context = &stream_;
+ config_.width = kWidth;
+ config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
test_engine_->Initialize(MessageLoop::current(), this, config_);
- EXPECT_FALSE(info_.success_);
+ EXPECT_FALSE(info_.success);
}
TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
@@ -182,14 +182,14 @@ TEST_F(FFmpegVideoDecodeEngineTest, Initialize_OpenDecoderFails) {
EXPECT_CALL(*MockFFmpeg::get(), AVFree(&yuv_frame_))
.Times(1);
- config_.codec_ = kCodecH264;
- config_.opaque_context_ = &stream_;
- config_.width_ = kWidth;
- config_.height_ = kHeight;
+ config_.codec = kCodecH264;
+ config_.opaque_context = &stream_;
+ config_.width = kWidth;
+ config_.height = kHeight;
EXPECT_CALL(*this, OnInitializeComplete(_))
.WillOnce(SaveInitializeResult(this));
test_engine_->Initialize(MessageLoop::current(), this, config_);
- EXPECT_FALSE(info_.success_);
+ EXPECT_FALSE(info_.success);
}
ACTION_P2(DemuxComplete, engine, buffer) {
diff --git a/media/video/omx_video_decode_engine.cc b/media/video/omx_video_decode_engine.cc
index b953cfa..e1bcc7f 100644
--- a/media/video/omx_video_decode_engine.cc
+++ b/media/video/omx_video_decode_engine.cc
@@ -88,8 +88,8 @@ void OmxVideoDecodeEngine::Initialize(
message_loop_ = message_loop;
event_handler_ = event_handler;
- width_ = config.width_;
- height_ = config.height_;
+ width_ = config.width;
+ height_ = config.height;
// TODO(wjia): Find the right way to determine the codec type.
OmxConfigurator::MediaFormat input_format, output_format;
@@ -107,14 +107,14 @@ void OmxVideoDecodeEngine::Initialize(
VideoCodecInfo info;
// TODO(jiesun): ridiculous, we never fail initialization?
- info.success_ = true;
- info.provides_buffers_ = !uses_egl_image_;
- info.stream_info_.surface_type_ =
+ info.success = true;
+ info.provides_buffers = !uses_egl_image_;
+ info.stream_info.surface_type =
uses_egl_image_ ? VideoFrame::TYPE_GL_TEXTURE
: VideoFrame::TYPE_SYSTEM_MEMORY;
- info.stream_info_.surface_format_ = GetSurfaceFormat();
- info.stream_info_.surface_width_ = config.width_;
- info.stream_info_.surface_height_ = config.height_;
+ info.stream_info.surface_format = GetSurfaceFormat();
+ info.stream_info.surface_width = config.width;
+ info.stream_info.surface_height = config.height;
event_handler_->OnInitializeComplete(info);
}
diff --git a/media/video/video_decode_engine.h b/media/video/video_decode_engine.h
index df14522..8736890 100644
--- a/media/video/video_decode_engine.h
+++ b/media/video/video_decode_engine.h
@@ -26,46 +26,50 @@ static const uint32 kProfileDoNotCare = static_cast<uint32>(-1);
static const uint32 kLevelDoNotCare = static_cast<uint32>(-1);
struct VideoCodecConfig {
- VideoCodecConfig() : codec_(kCodecH264),
- profile_(kProfileDoNotCare),
- level_(kLevelDoNotCare),
- width_(0),
- height_(0),
- opaque_context_(NULL) {}
+ VideoCodecConfig() : codec(kCodecH264),
+ profile(kProfileDoNotCare),
+ level(kLevelDoNotCare),
+ width(0),
+ height(0),
+ opaque_context(NULL) {}
- VideoCodec codec_;
+ VideoCodec codec;
// TODO(jiesun): video profile and level are specific to individual codec.
// Define enum to.
- uint32 profile_;
- uint32 level_;
+ uint32 profile;
+ uint32 level;
// Container's concept of width and height of this video.
- int32 width_;
- int32 height_; // TODO(jiesun): Do we allow height to be negative to
+ int32 width;
+ int32 height; // TODO(jiesun): Do we allow height to be negative to
// indicate output is upside-down?
// FFMPEG's will use this to pass AVStream. Otherwise, we should remove this.
- void* opaque_context_;
+ void* opaque_context;
};
struct VideoStreamInfo {
- VideoFrame::Format surface_format_;
- VideoFrame::SurfaceType surface_type_;
- uint32 surface_width_; // Can be different with container's value.
- uint32 surface_height_; // Can be different with container's value.
+ VideoFrame::Format surface_format;
+ VideoFrame::SurfaceType surface_type;
+
+ // Can be different with container's value.
+ uint32 surface_width;
+
+ // Can be different with container's value.
+ uint32 surface_height;
};
struct VideoCodecInfo {
// Other parameter is only meaningful when this is true.
- bool success_;
+ bool success;
// Whether decoder provides output buffer pool.
- bool provides_buffers_;
+ bool provides_buffers;
// Initial Stream Info. Only part of them could be valid.
// If they are not valid, Engine should update with OnFormatChange.
- VideoStreamInfo stream_info_;
+ VideoStreamInfo stream_info;
};
class VideoDecodeEngine {