summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-12 19:50:58 +0000
committerimcheng@chromium.org <imcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-08-12 19:50:58 +0000
commitdf9a4de661636de81fccb6cfa552de94e84efa50 (patch)
tree9e336b8a9a0059524d79ffa0686d92f5c7f9ed50
parentbb639038078bfd16f820c026efeb1a9d21395e60 (diff)
downloadchromium_src-df9a4de661636de81fccb6cfa552de94e84efa50.zip
chromium_src-df9a4de661636de81fccb6cfa552de94e84efa50.tar.gz
chromium_src-df9a4de661636de81fccb6cfa552de94e84efa50.tar.bz2
Added error callbacks. Decoder no longer explicitly quits the message loop on end of output. Also added a seeking functionality for rendering to window that goes to the first keyframe after X seconds. Both of these require a minor change in the decoder API.
BUG=none TEST=none Review URL: http://codereview.chromium.org/3146011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55917 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--media/mf/basic_renderer.cc13
-rw-r--r--media/mf/basic_renderer.h6
-rw-r--r--media/mf/file_reader_util.cc94
-rw-r--r--media/mf/file_reader_util.h23
-rw-r--r--media/mf/mft_h264_decoder.cc164
-rw-r--r--media/mf/mft_h264_decoder.h39
-rw-r--r--media/mf/mft_h264_decoder_example.cc38
-rw-r--r--media/mf/test/mft_h264_decoder_unittest.cc166
8 files changed, 371 insertions, 172 deletions
diff --git a/media/mf/basic_renderer.cc b/media/mf/basic_renderer.cc
index b85afd5..d1eddb9 100644
--- a/media/mf/basic_renderer.cc
+++ b/media/mf/basic_renderer.cc
@@ -10,7 +10,6 @@
#include "base/message_loop.h"
#include "base/scoped_comptr_win.h"
-#include "base/time.h"
#include "media/base/yuv_convert.h"
// For MFGetService and MF_BUFFER_SERVICE (getting D3D surface from buffer)
@@ -173,7 +172,7 @@ void NullRenderer::StartPlayback() {
&MftH264Decoder::GetOutput));
}
-void NullRenderer::StopPlayback() {
+void NullRenderer::OnDecodeError(MftH264Decoder::Error error) {
MessageLoop::current()->Quit();
}
@@ -189,6 +188,10 @@ BasicRenderer::BasicRenderer(MftH264Decoder* decoder,
BasicRenderer::~BasicRenderer() {}
void BasicRenderer::ProcessFrame(scoped_refptr<VideoFrame> frame) {
+ MessageLoopForUI::current()->PostDelayedTask(
+ FROM_HERE, NewRunnableMethod(decoder_.get(),
+ &MftH264Decoder::GetOutput),
+ frame->GetDuration().InMilliseconds());
if (device_ != NULL) {
if (!PaintD3D9BufferOntoWindow(device_,
static_cast<IMFMediaBuffer*>(frame->private_buffer()))) {
@@ -202,10 +205,6 @@ void BasicRenderer::ProcessFrame(scoped_refptr<VideoFrame> frame) {
}
}
ReleaseOutputBuffer(frame);
- MessageLoopForUI::current()->PostDelayedTask(
- FROM_HERE, NewRunnableMethod(decoder_.get(),
- &MftH264Decoder::GetOutput),
- frame->GetDuration().InMilliseconds());
}
void BasicRenderer::StartPlayback() {
@@ -214,7 +213,7 @@ void BasicRenderer::StartPlayback() {
&MftH264Decoder::GetOutput));
}
-void BasicRenderer::StopPlayback() {
+void BasicRenderer::OnDecodeError(MftH264Decoder::Error error) {
MessageLoopForUI::current()->Quit();
}
diff --git a/media/mf/basic_renderer.h b/media/mf/basic_renderer.h
index 97711c2..367446d 100644
--- a/media/mf/basic_renderer.h
+++ b/media/mf/basic_renderer.h
@@ -23,7 +23,7 @@ class MftRenderer : public base::RefCountedThreadSafe<MftRenderer> {
virtual ~MftRenderer() {}
virtual void ProcessFrame(scoped_refptr<VideoFrame> frame) = 0;
virtual void StartPlayback() = 0;
- virtual void StopPlayback() = 0;
+ virtual void OnDecodeError(MftH264Decoder::Error error) = 0;
protected:
scoped_refptr<MftH264Decoder> decoder_;
@@ -36,7 +36,7 @@ class NullRenderer : public MftRenderer {
virtual ~NullRenderer();
virtual void ProcessFrame(scoped_refptr<VideoFrame> frame);
virtual void StartPlayback();
- virtual void StopPlayback();
+ virtual void OnDecodeError(MftH264Decoder::Error error);
};
// This renderer does a basic playback by drawing to |window_|. It tries to
@@ -48,7 +48,7 @@ class BasicRenderer : public MftRenderer {
virtual ~BasicRenderer();
virtual void ProcessFrame(scoped_refptr<VideoFrame> frame);
virtual void StartPlayback();
- virtual void StopPlayback();
+ virtual void OnDecodeError(MftH264Decoder::Error error);
private:
HWND window_;
diff --git a/media/mf/file_reader_util.cc b/media/mf/file_reader_util.cc
index c4a9873..dcfc114 100644
--- a/media/mf/file_reader_util.cc
+++ b/media/mf/file_reader_util.cc
@@ -11,8 +11,8 @@
#include <algorithm>
-#include "base/scoped_comptr_win.h"
#include "base/logging.h"
+#include "media/base/data_buffer.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/bitstream_converter.h"
@@ -26,7 +26,7 @@ FFmpegFileReader::FFmpegFileReader(const std::string& filename)
codec_context_(NULL),
target_stream_(-1),
converter_(NULL),
- end_of_stream_(false) {
+ last_timestamp_(0) {
}
FFmpegFileReader::~FFmpegFileReader() {
@@ -92,15 +92,9 @@ bool FFmpegFileReader::Initialize() {
return true;
}
-void FFmpegFileReader::Read(uint8** output, int* size) {
- Read2(output, size, NULL, NULL);
-}
-
-void FFmpegFileReader::Read2(uint8** output, int* size, int64* timestamp,
- int64* duration) {
+void FFmpegFileReader::Read(scoped_refptr<DataBuffer>* output) {
if (!format_context_ || !codec_context_ || target_stream_ == -1) {
- *size = 0;
- *output = NULL;
+ *output = new DataBuffer(0);
return;
}
AVPacket packet;
@@ -108,49 +102,34 @@ void FFmpegFileReader::Read2(uint8** output, int* size, int64* timestamp,
while (!found) {
int result = av_read_frame(format_context_, &packet);
if (result < 0) {
- *output = NULL;
- *size = 0;
- end_of_stream_ = true;
+ *output = new DataBuffer(0);
return;
}
if (packet.stream_index == target_stream_) {
if (converter_.get() && !converter_->ConvertPacket(&packet)) {
LOG(ERROR) << "failed to convert AVPacket";
}
- *output = new uint8[packet.size];
- if (*output == NULL) {
- LOG(ERROR) << "Failed to allocate buffer for annex b stream";
- *size = 0;
- return;
- }
- *size = packet.size;
- memcpy(*output, packet.data, packet.size);
- if (duration) {
- if (packet.duration == 0) {
- LOG(WARNING) << "Packet duration not known";
- }
- // This is in AVCodecContext::time_base units
- *duration = ConvertFFmpegTimeBaseTo100Ns(packet.duration);
- }
- if (timestamp) {
- if (packet.pts == AV_NOPTS_VALUE) {
- LOG(ERROR) << "Packet presentation time not known";
- *timestamp = 0L;
- } else {
- // This is in AVCodecContext::time_base units
- *timestamp = ConvertFFmpegTimeBaseTo100Ns(packet.pts);
- }
- }
+ last_timestamp_ = std::max(last_timestamp_, packet.pts);
+ CopyPacketToBuffer(&packet, output);
found = true;
}
av_free_packet(&packet);
}
}
+bool FFmpegFileReader::SeekForward(int64 seek_amount_us) {
+ if (!format_context_ || !codec_context_ || target_stream_ == -1) {
+ return false;
+ }
+ int64 new_us = TimeBaseToMicroseconds(last_timestamp_) + seek_amount_us;
+ int64 new_timestamp = MicrosecondsToTimeBase(new_us);
+ last_timestamp_ = new_timestamp;
+ return av_seek_frame(format_context_, target_stream_, new_timestamp, 0) >= 0;
+}
+
bool FFmpegFileReader::GetFrameRate(int* num, int* denom) const {
if (!codec_context_)
return false;
-
*denom = codec_context_->time_base.num;
*num = codec_context_->time_base.den;
if (*denom == 0) {
@@ -185,16 +164,49 @@ bool FFmpegFileReader::GetAspectRatio(int* num, int* denom) const {
return true;
}
-int64 FFmpegFileReader::ConvertFFmpegTimeBaseTo100Ns(
+int64 FFmpegFileReader::TimeBaseToMicroseconds(
int64 time_base_unit) const {
// FFmpeg units after time base conversion seems to be actually given in
// milliseconds (instead of seconds...) so we need to multiply it by a factor
- // of 10,000 to convert it into units compatible with MF.
+ // of 1,000.
// Note we need to double this because the frame rate is doubled in
// ffmpeg.
CHECK(codec_context_) << "Codec context needs to be initialized";
- return time_base_unit * 20000 * codec_context_->time_base.num /
+ return time_base_unit * 2000 * codec_context_->time_base.num /
codec_context_->time_base.den;
}
+int64 FFmpegFileReader::MicrosecondsToTimeBase(
+ int64 time_base_unit) const {
+ // ffmpeg.
+ CHECK(codec_context_) << "Codec context needs to be initialized";
+ return time_base_unit * codec_context_->time_base.den / 2000 /
+ codec_context_->time_base.num;
+}
+
+void FFmpegFileReader::CopyPacketToBuffer(AVPacket* packet,
+ scoped_refptr<DataBuffer>* output) {
+ uint8* buffer = new uint8[packet->size];
+ if (buffer == NULL) {
+ LOG(ERROR) << "Failed to allocate buffer for annex b stream";
+ *output = NULL;
+ return;
+ }
+ memcpy(buffer, packet->data, packet->size);
+ *output = new DataBuffer(buffer, packet->size);
+ if (packet->pts != AV_NOPTS_VALUE) {
+ (*output)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(
+ TimeBaseToMicroseconds(packet->pts)));
+ } else {
+ (*output)->SetTimestamp(StreamSample::kInvalidTimestamp);
+ }
+ if (packet->duration == 0) {
+ LOG(WARNING) << "Packet duration not known";
+ }
+ (*output)->SetDuration(
+ base::TimeDelta::FromMicroseconds(
+ TimeBaseToMicroseconds(packet->duration)));
+}
+
} // namespace media
diff --git a/media/mf/file_reader_util.h b/media/mf/file_reader_util.h
index a1426e0..a5fa9ec 100644
--- a/media/mf/file_reader_util.h
+++ b/media/mf/file_reader_util.h
@@ -12,15 +12,17 @@
#include <string>
#include "base/basictypes.h"
-#include "base/scoped_handle.h"
+#include "base/ref_counted.h"
#include "base/scoped_ptr.h"
struct AVCodecContext;
struct AVFormatContext;
+struct AVPacket;
namespace media {
class BitstreamConverter;
+class DataBuffer;
// A class to help reading and parsing input file for use in omx_test.
class FileReader {
@@ -32,7 +34,7 @@ class FileReader {
// Read the file into |output|, and output the number of bytes read to
// |size|.
- virtual void Read(uint8** output, int* size) = 0;
+ virtual void Read(scoped_refptr<DataBuffer>* output) = 0;
};
class FFmpegFileReader : public FileReader {
@@ -40,26 +42,25 @@ class FFmpegFileReader : public FileReader {
explicit FFmpegFileReader(const std::string& filename);
virtual ~FFmpegFileReader();
virtual bool Initialize();
- virtual void Read(uint8** output, int* size);
+ virtual void Read(scoped_refptr<DataBuffer>* output);
+ virtual bool SeekForward(int64 seek_amount_us);
- // Reads a video packet, converts it into Annex B stream, and allocates a
- // buffer to |*output| and copies the contents into it. Timestamp and
- // duration are given in 100-ns units.
- void Read2(uint8** output, int* size, int64* timestamp, int64* duration);
bool GetFrameRate(int* num, int* denom) const;
bool GetWidth(int* width) const;
bool GetHeight(int* height) const;
bool GetAspectRatio(int* num, int* denom) const;
- int64 ConvertFFmpegTimeBaseTo100Ns(int64 time_base_unit) const;
- bool end_of_stream() const { return end_of_stream_; }
+ int64 TimeBaseToMicroseconds(int64 time_base_unit) const;
+ int64 MicrosecondsToTimeBase(int64 time_base_unit) const;
private:
+ void CopyPacketToBuffer(AVPacket* packet, scoped_refptr<DataBuffer>* output);
+
std::string filename_;
AVFormatContext* format_context_;
AVCodecContext* codec_context_;
int target_stream_;
- scoped_ptr<media::BitstreamConverter> converter_;
- bool end_of_stream_;
+ scoped_ptr<BitstreamConverter> converter_;
+ int64 last_timestamp_;
DISALLOW_COPY_AND_ASSIGN(FFmpegFileReader);
};
diff --git a/media/mf/mft_h264_decoder.cc b/media/mf/mft_h264_decoder.cc
index ffbed15..3762c27 100644
--- a/media/mf/mft_h264_decoder.cc
+++ b/media/mf/mft_h264_decoder.cc
@@ -20,6 +20,7 @@
#include "base/logging.h"
#include "base/message_loop.h"
#include "base/scoped_comptr_win.h"
+#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
#pragma comment(lib, "d3d9.lib")
@@ -93,7 +94,7 @@ static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
// the decoder for input). The times here should be given in 100ns units.
// |alignment| specifies the buffer in the sample to be aligned. If no
// alignment is required, provide 0 or 1.
-static IMFSample* CreateInputSample(uint8* stream, int size,
+static IMFSample* CreateInputSample(const uint8* stream, int size,
int64 timestamp, int64 duration,
int min_size, int alignment) {
CHECK(stream);
@@ -151,10 +152,12 @@ static IMFSample* CreateInputSample(uint8* stream, int size,
MftH264Decoder::MftH264Decoder(bool use_dxva)
: read_input_callback_(NULL),
output_avail_callback_(NULL),
+ output_error_callback_(NULL),
decoder_(NULL),
initialized_(false),
use_dxva_(use_dxva),
drain_message_sent_(false),
+ next_frame_discontinuous_(false),
in_buffer_size_(0),
in_buffer_alignment_(0),
out_buffer_size_(0),
@@ -182,15 +185,17 @@ bool MftH264Decoder::Init(IDirect3DDeviceManager9* dev_manager,
int width, int height,
int aspect_num, int aspect_denom,
ReadInputCallback* read_input_cb,
- OutputReadyCallback* output_avail_cb) {
+ OutputReadyCallback* output_avail_cb,
+ OutputErrorCallback* output_error_cb) {
if (initialized_)
return true;
- if (!read_input_cb || !output_avail_cb) {
- LOG(ERROR) << "No callback provided";
+ if (!read_input_cb || !output_avail_cb || !output_error_cb) {
+ LOG(ERROR) << "Callbacks missing in Init";
return false;
}
read_input_callback_.reset(read_input_cb);
output_avail_callback_.reset(output_avail_cb);
+ output_error_callback_.reset(output_error_cb);
if (!InitComMfLibraries())
return false;
if (!InitDecoder(dev_manager, frame_rate_num, frame_rate_denom,
@@ -204,32 +209,6 @@ bool MftH264Decoder::Init(IDirect3DDeviceManager9* dev_manager,
return true;
}
-bool MftH264Decoder::SendInput(uint8* data, int size, int64 timestamp,
- int64 duration) {
- CHECK(initialized_);
- CHECK(data);
- CHECK_GT(size, 0);
- if (drain_message_sent_) {
- LOG(ERROR) << "Drain message was already sent, but trying to send more "
- << "input to decoder";
- return false;
- }
- ScopedComPtr<IMFSample> sample;
- sample.Attach(CreateInputSample(data, size, timestamp, duration,
- in_buffer_size_, in_buffer_alignment_));
- if (!sample.get()) {
- LOG(ERROR) << "Failed to convert input stream to sample";
- return false;
- }
- HRESULT hr = decoder_->ProcessInput(0, sample.get(), 0);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to ProcessInput, hr = " << std::hex << hr;
- return false;
- }
- frames_read_++;
- return true;
-}
-
static const char* const ProcessOutputStatusToCString(HRESULT hr) {
if (hr == MF_E_TRANSFORM_STREAM_CHANGE)
return "media stream change occurred, need to set output type";
@@ -239,7 +218,7 @@ static const char* const ProcessOutputStatusToCString(HRESULT hr) {
return "unhandled error from ProcessOutput";
}
-MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
+void MftH264Decoder::GetOutput() {
CHECK(initialized_);
ScopedComPtr<IMFSample> output_sample;
@@ -249,7 +228,8 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
out_buffer_alignment_));
if (!output_sample.get()) {
LOG(ERROR) << "GetSample: failed to create empty output sample";
- return kNoMemory;
+ output_error_callback_->Run(kNoMemory);
+ return;
}
}
MFT_OUTPUT_DATA_BUFFER output_data_buffer;
@@ -275,8 +255,8 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
if (!SetDecoderOutputMediaType(output_format_)) {
LOG(ERROR) << "Failed to reset output type";
- MessageLoop::current()->Quit();
- return kResetOutputStreamFailed;
+ output_error_callback_->Run(kResetOutputStreamFailed);
+ return;
} else {
LOG(INFO) << "Reset output type done";
continue;
@@ -287,22 +267,22 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
// anymore output then we know the decoder has processed everything.
if (drain_message_sent_) {
LOG(INFO) << "Drain message was already sent + no output => done";
- MessageLoop::current()->Quit();
- return kNoMoreOutput;
+ output_error_callback_->Run(kNoMoreOutput);
+ return;
} else {
- if (!ReadAndProcessInput()) {
+ if (!ReadInput()) {
LOG(INFO) << "Failed to read/process input. Sending drain message";
- if (!SendDrainMessage()) {
+ if (!SendEndOfStreamMessage() || !SendDrainMessage()) {
LOG(ERROR) << "Failed to send drain message";
- MessageLoop::current()->Quit();
- return kNoMoreOutput;
+ output_error_callback_->Run(kNoMoreOutput);
+ return;
}
}
continue;
}
} else {
- MessageLoop::current()->Quit();
- return kUnspecifiedError;
+ output_error_callback_->Run(kUnspecifiedError);
+ return;
}
} else {
// A decoded sample was successfully obtained.
@@ -314,8 +294,8 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
if (!output_sample.get()) {
LOG(ERROR) << "Output sample using DXVA is NULL - ProcessOutput did "
<< "not provide it!";
- MessageLoop::current()->Quit();
- return kOutputSampleError;
+ output_error_callback_->Run(kOutputSampleError);
+ return;
}
}
int64 timestamp, duration;
@@ -324,8 +304,8 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get sample duration or timestamp "
<< std::hex << hr;
- MessageLoop::current()->Quit();
- return kOutputSampleError;
+ output_error_callback_->Run(kOutputSampleError);
+ return;
}
// The duration and timestamps are in 100-ns units, so divide by 10
@@ -338,20 +318,20 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
hr = output_sample->GetBufferCount(&buf_count);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get buff count, hr = " << std::hex << hr;
- MessageLoop::current()->Quit();
- return kOutputSampleError;
+ output_error_callback_->Run(kOutputSampleError);
+ return;
}
if (buf_count == 0) {
LOG(ERROR) << "buf_count is 0, dropping sample";
- MessageLoop::current()->Quit();
- return kOutputSampleError;
+ output_error_callback_->Run(kOutputSampleError);
+ return;
}
ScopedComPtr<IMFMediaBuffer> out_buffer;
hr = output_sample->GetBufferByIndex(0, out_buffer.Receive());
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get decoded output buffer";
- MessageLoop::current()->Quit();
- return kOutputSampleError;
+ output_error_callback_->Run(kOutputSampleError);
+ return;
}
// To obtain the data, the caller should call the Lock() method instead
@@ -380,11 +360,22 @@ MftH264Decoder::DecoderOutputState MftH264Decoder::GetOutput() {
CHECK(decoded_frame.get());
frames_decoded_++;
output_avail_callback_->Run(decoded_frame);
- return kOutputOk;
+ return;
}
}
}
+bool MftH264Decoder::Flush() {
+ CHECK(initialized_);
+ HRESULT hr = decoder_->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to send the flush message to decoder";
+ return false;
+ }
+ next_frame_discontinuous_ = true;
+ return true;
+}
+
// Private methods
bool MftH264Decoder::InitComMfLibraries() {
@@ -565,7 +556,7 @@ bool MftH264Decoder::SetDecoderOutputMediaType(const GUID subtype) {
bool MftH264Decoder::SendStartMessage() {
HRESULT hr;
- hr = decoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL);
+ hr = decoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
if (FAILED(hr)) {
LOG(ERROR) << "Process start message failed, hr = "
<< std::hex << std::showbase << hr;
@@ -627,22 +618,69 @@ bool MftH264Decoder::GetStreamsInfoAndBufferReqs() {
return true;
}
-bool MftH264Decoder::ReadAndProcessInput() {
- uint8* input_stream_dummy;
- int size;
- int64 duration;
- int64 timestamp;
- read_input_callback_->Run(&input_stream_dummy, &size, &timestamp, &duration);
- scoped_array<uint8> input_stream(input_stream_dummy);
- if (!input_stream.get()) {
+bool MftH264Decoder::ReadInput() {
+ scoped_refptr<DataBuffer> input;
+ read_input_callback_->Run(&input);
+ if (!input.get() || input->IsEndOfStream()) {
LOG(INFO) << "No more input";
return false;
} else {
// We read an input stream, we can feed it into the decoder.
- return SendInput(input_stream.get(), size, timestamp, duration);
+ return SendInput(input->GetData(), input->GetDataSize(),
+ input->GetTimestamp().InMicroseconds() * 10,
+ input->GetDuration().InMicroseconds() * 10);
}
}
+bool MftH264Decoder::SendInput(const uint8* data, int size, int64 timestamp,
+ int64 duration) {
+ CHECK(initialized_);
+ CHECK(data);
+ CHECK_GT(size, 0);
+
+ bool current_frame_discontinuous = next_frame_discontinuous_;
+ next_frame_discontinuous_ = true;
+
+ if (drain_message_sent_) {
+ LOG(ERROR) << "Drain message was already sent, but trying to send more "
+ << "input to decoder";
+ return false;
+ }
+ ScopedComPtr<IMFSample> sample;
+ sample.Attach(CreateInputSample(data, size, timestamp, duration,
+ in_buffer_size_, in_buffer_alignment_));
+ if (!sample.get()) {
+ LOG(ERROR) << "Failed to convert input stream to sample";
+ return false;
+ }
+ HRESULT hr;
+ if (current_frame_discontinuous) {
+ hr = sample->SetUINT32(MFSampleExtension_Discontinuity, TRUE);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to set sample discontinuity " << std::hex << hr;
+ }
+ }
+ hr = decoder_->ProcessInput(0, sample.get(), 0);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to ProcessInput, hr = " << std::hex << hr;
+ return false;
+ }
+ frames_read_++;
+ next_frame_discontinuous_ = false;
+ return true;
+}
+
+bool MftH264Decoder::SendEndOfStreamMessage() {
+ CHECK(initialized_);
+ // Send the eos message with no parameters.
+ HRESULT hr = decoder_->ProcessMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to send the drain message to decoder";
+ return false;
+ }
+ return true;
+}
+
bool MftH264Decoder::SendDrainMessage() {
CHECK(initialized_);
if (drain_message_sent_) {
diff --git a/media/mf/mft_h264_decoder.h b/media/mf/mft_h264_decoder.h
index 1590331..750875a 100644
--- a/media/mf/mft_h264_decoder.h
+++ b/media/mf/mft_h264_decoder.h
@@ -18,7 +18,6 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/scoped_ptr.h"
-#include "base/scoped_comptr_win.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
struct IDirect3DDeviceManager9;
@@ -26,21 +25,23 @@ struct IMFTransform;
namespace media {
+class DataBuffer;
class VideoFrame;
// A decoder that takes samples of Annex B streams then outputs decoded frames.
class MftH264Decoder : public base::RefCountedThreadSafe<MftH264Decoder> {
public:
- enum DecoderOutputState {
- kOutputOk = 0,
- kResetOutputStreamFailed,
+ enum Error {
+ kResetOutputStreamFailed = 0,
kNoMoreOutput,
kUnspecifiedError,
kNoMemory,
kOutputSampleError
};
- typedef Callback4<uint8**, int*, int64*, int64*>::Type ReadInputCallback;
+ typedef Callback1<scoped_refptr<DataBuffer>*>::Type
+ ReadInputCallback;
typedef Callback1<scoped_refptr<VideoFrame> >::Type OutputReadyCallback;
+ typedef Callback1<Error>::Type OutputErrorCallback;
explicit MftH264Decoder(bool use_dxva);
~MftH264Decoder();
@@ -58,18 +59,14 @@ class MftH264Decoder : public base::RefCountedThreadSafe<MftH264Decoder> {
int width, int height,
int aspect_num, int aspect_denom,
ReadInputCallback* read_input_cb,
- OutputReadyCallback* output_avail_cb);
-
- // Sends an Annex B stream to the decoder. The times here should be given
- // in 100ns units. This creates a IMFSample, copies the stream over to the
- // sample, and sends the sample to the decoder.
- // Returns: true if the sample was sent successfully.
- bool SendInput(uint8* data, int size, int64 timestamp, int64 duration);
+ OutputReadyCallback* output_avail_cb,
+ OutputErrorCallback* output_error_cb);
// Tries to get an output sample from the decoder, and if successful, calls
- // the callback with the sample.
- // Returns: status of the decoder.
- DecoderOutputState GetOutput();
+ // the callback with the sample, or status of the decoder if an error
+ // occurred.
+ void GetOutput();
+ bool Flush();
bool initialized() const { return initialized_; }
bool use_dxva() const { return use_dxva_; }
@@ -103,7 +100,15 @@ class MftH264Decoder : public base::RefCountedThreadSafe<MftH264Decoder> {
bool SetDecoderOutputMediaType(const GUID subtype);
bool SendStartMessage();
bool GetStreamsInfoAndBufferReqs();
- bool ReadAndProcessInput();
+ bool ReadInput();
+
+ // Sends an Annex B stream to the decoder. The times here should be given
+ // in 100ns units. This creates a IMFSample, copies the stream over to the
+ // sample, and sends the sample to the decoder.
+ // Returns: true if the sample was sent successfully.
+ bool SendInput(const uint8* data, int size, int64 timestamp, int64 duration);
+
+ bool SendEndOfStreamMessage();
// Sends a drain message to the decoder to indicate no more input will be
// sent. SendInput() should not be called after calling this method.
@@ -113,10 +118,12 @@ class MftH264Decoder : public base::RefCountedThreadSafe<MftH264Decoder> {
// |output_error_callback_| should stop the message loop.
scoped_ptr<ReadInputCallback> read_input_callback_;
scoped_ptr<OutputReadyCallback> output_avail_callback_;
+ scoped_ptr<OutputErrorCallback> output_error_callback_;
IMFTransform* decoder_;
bool initialized_;
bool use_dxva_;
bool drain_message_sent_;
+ bool next_frame_discontinuous_;
// Minimum input and output buffer sizes/alignment required by the decoder.
// If |buffer_alignment_| is zero, then the buffer needs not be aligned.
diff --git a/media/mf/mft_h264_decoder_example.cc b/media/mf/mft_h264_decoder_example.cc
index a323e62..6292957 100644
--- a/media/mf/mft_h264_decoder_example.cc
+++ b/media/mf/mft_h264_decoder_example.cc
@@ -50,6 +50,7 @@ void usage() {
"Usage: mft_h264_decoder [--enable-dxva] [--render] --input-file=FILE\n"
"enable-dxva: Enables hardware accelerated decoding\n"
"render: Render to window\n"
+ "During rendering, press spacebar to skip forward at least 5 seconds.\n"
"To display this message: mft_h264_decoder --help";
fprintf(stderr, "%s\n", usage_msg);
}
@@ -105,6 +106,31 @@ static HWND CreateDrawWindow(int width, int height) {
return window;
}
+class WindowObserver : public base::MessagePumpWin::Observer {
+ public:
+ WindowObserver(FFmpegFileReader* reader, MftH264Decoder* decoder)
+ : reader_(reader),
+ decoder_(decoder) {
+ }
+
+ virtual void WillProcessMessage(const MSG& msg) {
+ if (msg.message == WM_CHAR && msg.wParam == ' ') {
+ if (!decoder_->Flush()) {
+ LOG(ERROR) << "Flush failed";
+ }
+ // Seek forward 5 seconds.
+ reader_->SeekForward(5000000);
+ }
+ }
+
+ virtual void DidProcessMessage(const MSG& msg) {
+ }
+
+ private:
+ FFmpegFileReader* reader_;
+ MftH264Decoder* decoder_;
+};
+
static int Run(bool use_dxva, bool render, const std::string& input_file) {
// If we are not rendering, we need a window anyway to create a D3D device,
// so we will just use the desktop window. (?)
@@ -147,7 +173,7 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
}
scoped_refptr<MftH264Decoder> mft(new MftH264Decoder(use_dxva));
scoped_refptr<MftRenderer> renderer;
- if (render) {
+ if (render) {
renderer = new BasicRenderer(mft.get(), window, device);
} else {
renderer = new NullRenderer(mft.get());
@@ -160,11 +186,14 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
frame_rate_num, frame_rate_denom,
width, height,
aspect_ratio_num, aspect_ratio_denom,
- NewCallback(reader.get(), &FFmpegFileReader::Read2),
- NewCallback(renderer.get(), &MftRenderer::ProcessFrame))) {
+ NewCallback(reader.get(), &FFmpegFileReader::Read),
+ NewCallback(renderer.get(), &MftRenderer::ProcessFrame),
+ NewCallback(renderer.get(),
+ &MftRenderer::OnDecodeError))) {
LOG(ERROR) << "Failed to initialize mft";
return -1;
}
+ scoped_ptr<WindowObserver> observer;
// If rendering, resize the window to fit the video frames.
if (render) {
RECT rect;
@@ -177,6 +206,8 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
rect.bottom - rect.top, TRUE)) {
LOG(WARNING) << "Warning: Failed to resize window";
}
+ observer.reset(new WindowObserver(reader.get(), mft.get()));
+ MessageLoopForUI::current()->AddObserver(observer.get());
}
if (use_dxva) {
// Reset the device's back buffer dimensions to match the window's
@@ -190,6 +221,7 @@ static int Run(bool use_dxva, bool render, const std::string& input_file) {
}
}
Time decode_start(Time::Now());
+
MessageLoopForUI::current()->PostTask(FROM_HERE,
NewRunnableMethod(renderer.get(), &MftRenderer::StartPlayback));
MessageLoopForUI::current()->Run(NULL);
diff --git a/media/mf/test/mft_h264_decoder_unittest.cc b/media/mf/test/mft_h264_decoder_unittest.cc
index 8bc7b41..daf2c66 100644
--- a/media/mf/test/mft_h264_decoder_unittest.cc
+++ b/media/mf/test/mft_h264_decoder_unittest.cc
@@ -10,8 +10,10 @@
#include "base/file_util.h"
#include "base/message_loop.h"
#include "base/path_service.h"
+#include "base/scoped_comptr_win.h"
#include "base/scoped_ptr.h"
#include "base/string_util.h"
+#include "media/base/data_buffer.h"
#include "media/base/video_frame.h"
#include "media/mf/d3d_util.h"
#include "media/mf/file_reader_util.h"
@@ -30,18 +32,20 @@ class FakeMftReader {
~FakeMftReader() {}
// Provides garbage input to the decoder.
- void ReadCallback(uint8** buf, int* sz, int64* ts, int64* dur) {
+ void ReadCallback(scoped_refptr<DataBuffer>* input) {
if (frames_remaining_ > 0) {
- *sz = 4096;
- *buf = new uint8[*sz];
- memset(*buf, 42, *sz);
- *ts = 50000000 - frames_remaining_ * 10000;
- *dur = 5000;
+ int sz = 4096;
+ uint8* buf = new uint8[sz];
+ memset(buf, 42, sz);
+ *input = new DataBuffer(buf, sz);
+ (*input)->SetDuration(base::TimeDelta::FromMicroseconds(5000));
+ (*input)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(
+ 50000000 - frames_remaining_ * 10000));
--frames_remaining_;
} else {
// Emulate end of stream on the last "frame".
- *buf = NULL;
- *sz = 0;
+ *input = new DataBuffer(0);
}
}
int frames_remaining() const { return frames_remaining_; }
@@ -54,30 +58,45 @@ class FakeMftRenderer : public base::RefCountedThreadSafe<FakeMftRenderer> {
public:
explicit FakeMftRenderer(scoped_refptr<MftH264Decoder> decoder)
: decoder_(decoder),
- count_(0) {
+ count_(0),
+ flush_countdown_(0) {
}
- ~FakeMftRenderer() {}
+ virtual ~FakeMftRenderer() {}
- void WriteCallback(scoped_refptr<VideoFrame> frame) {
+ virtual void WriteCallback(scoped_refptr<VideoFrame> frame) {
static_cast<IMFMediaBuffer*>(frame->private_buffer())->Release();
++count_;
+ if (flush_countdown_ > 0) {
+ if (--flush_countdown_ == 0) {
+ decoder_->Flush();
+ }
+ }
MessageLoop::current()->PostTask(
FROM_HERE,
NewRunnableMethod(decoder_.get(), &MftH264Decoder::GetOutput));
}
- void Start() {
+ virtual void Start() {
MessageLoop::current()->PostTask(
FROM_HERE,
NewRunnableMethod(decoder_.get(), &MftH264Decoder::GetOutput));
}
+ virtual void OnDecodeError(MftH264Decoder::Error error) {
+ MessageLoop::current()->Quit();
+ }
+
+ virtual void SetFlushCountdown(int countdown) {
+ flush_countdown_ = countdown;
+ }
+
int count() const { return count_; }
- private:
+ protected:
scoped_refptr<MftH264Decoder> decoder_;
int count_;
+ int flush_countdown_;
};
class MftH264DecoderTest : public testing::Test {
@@ -99,7 +118,7 @@ TEST_F(MftH264DecoderTest, SimpleInit) {
CoUninitialize();
}
-TEST_F(MftH264DecoderTest, InitWithDxvaButNoD3DDevice) {
+TEST_F(MftH264DecoderTest, InitWithDxvaButNoD3dDevice) {
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
ASSERT_TRUE(decoder.get() != NULL);
FakeMftReader reader;
@@ -108,13 +127,15 @@ TEST_F(MftH264DecoderTest, InitWithDxvaButNoD3DDevice) {
decoder->Init(NULL, 6, 7, 111, 222, 3, 1,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
}
TEST_F(MftH264DecoderTest, InitMissingCallbacks) {
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
ASSERT_TRUE(decoder.get() != NULL);
- EXPECT_FALSE(decoder->Init(NULL, 1, 3, 111, 222, 56, 34, NULL, NULL));
+ EXPECT_FALSE(decoder->Init(NULL, 1, 3, 111, 222, 56, 34, NULL, NULL, NULL));
}
TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
@@ -125,7 +146,9 @@ TEST_F(MftH264DecoderTest, InitWithNegativeDimensions) {
EXPECT_TRUE(decoder->Init(NULL, 0, 6, -123, -456, 22, 4787,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
// By default, decoder should "guess" the dimensions to be the maximum.
EXPECT_EQ(kDecoderMaxWidth, decoder->width());
@@ -142,7 +165,9 @@ TEST_F(MftH264DecoderTest, InitWithTooHighDimensions) {
0, 0,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
// Decoder should truncate the dimensions to the maximum supported.
EXPECT_EQ(kDecoderMaxWidth, decoder->width());
@@ -158,7 +183,9 @@ TEST_F(MftH264DecoderTest, InitWithNormalDimensions) {
EXPECT_TRUE(decoder->Init(NULL, 0, 0, width, height, 0, 0,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
EXPECT_EQ(width, decoder->width());
EXPECT_EQ(height, decoder->height());
@@ -181,7 +208,9 @@ TEST_F(MftH264DecoderTest, SendDrainMessageAtInit) {
ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
EXPECT_TRUE(decoder->SendDrainMessage());
EXPECT_TRUE(decoder->drain_message_sent_);
}
@@ -197,7 +226,9 @@ TEST_F(MftH264DecoderTest, DrainOnEndOfInputStream) {
ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
MessageLoop::current()->PostTask(
FROM_HERE,
NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
@@ -217,7 +248,9 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
NewCallback(&reader, &FakeMftReader::ReadCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
MessageLoop::current()->PostTask(
FROM_HERE, NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
MessageLoop::current()->Run();
@@ -230,7 +263,7 @@ TEST_F(MftH264DecoderTest, NoOutputOnGarbageInput) {
EXPECT_EQ(0, renderer->count());
}
-FilePath GetBearVideoFilePath(const std::string& file_name) {
+FilePath GetVideoFilePath(const std::string& file_name) {
FilePath path;
PathService::Get(base::DIR_SOURCE_ROOT, &path);
path = path.AppendASCII("media")
@@ -244,7 +277,7 @@ FilePath GetBearVideoFilePath(const std::string& file_name) {
// H.264 video.
TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
MessageLoop loop;
- FilePath path = GetBearVideoFilePath("bear.1280x720.mp4");
+ FilePath path = GetVideoFilePath("bear.1280x720.mp4");
ASSERT_TRUE(file_util::PathExists(path));
ScopedComPtr<IDirect3D9> d3d9;
@@ -257,13 +290,16 @@ TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
ASSERT_TRUE(decoder.get() != NULL);
- FFmpegFileReader reader(WideToASCII(path.value()));
- ASSERT_TRUE(reader.Initialize());
+ scoped_ptr<FFmpegFileReader> reader(
+ new FFmpegFileReader(WideToASCII(path.value())));
+ ASSERT_TRUE(reader->Initialize());
scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
ASSERT_TRUE(decoder->Init(dev_manager.get(), 0, 0, 111, 222, 0, 0,
- NewCallback(&reader, &FFmpegFileReader::Read2),
+ NewCallback(reader.get(), &FFmpegFileReader::Read),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::WriteCallback),
NewCallback(renderer.get(),
- &FakeMftRenderer::WriteCallback)));
+ &FakeMftRenderer::OnDecodeError)));
MessageLoop::current()->PostTask(
FROM_HERE,
NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
@@ -275,4 +311,78 @@ TEST_F(MftH264DecoderTest, DecodeValidVideoDxva) {
EXPECT_LE(decoder->frames_read() - decoder->frames_decoded(), 1);
}
+TEST_F(MftH264DecoderTest, FlushAtInit) {
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
+ ASSERT_TRUE(decoder.get() != NULL);
+ FakeMftReader reader;
+ scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
+ ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
+ NewCallback(&reader, &FakeMftReader::ReadCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
+ EXPECT_TRUE(decoder->Flush());
+}
+
+TEST_F(MftH264DecoderTest, FlushAtEnd) {
+ MessageLoop loop;
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(false));
+ ASSERT_TRUE(decoder.get() != NULL);
+
+ // No frames, outputs a NULL indicating end-of-stream
+ FakeMftReader reader(0);
+ scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
+ ASSERT_TRUE(decoder->Init(NULL, 0, 0, 111, 222, 0, 0,
+ NewCallback(&reader, &FakeMftReader::ReadCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
+ MessageLoop::current()->Run();
+ EXPECT_TRUE(decoder->Flush());
+}
+
+TEST_F(MftH264DecoderTest, FlushAtMiddle) {
+ MessageLoop loop;
+ FilePath path = GetVideoFilePath("bear.1280x720.mp4");
+ ASSERT_TRUE(file_util::PathExists(path));
+
+ ScopedComPtr<IDirect3D9> d3d9;
+ ScopedComPtr<IDirect3DDevice9> device;
+ ScopedComPtr<IDirect3DDeviceManager9> dev_manager;
+ dev_manager.Attach(CreateD3DDevManager(GetDesktopWindow(),
+ d3d9.Receive(),
+ device.Receive()));
+ ASSERT_TRUE(dev_manager.get() != NULL);
+
+ scoped_refptr<MftH264Decoder> decoder(new MftH264Decoder(true));
+ ASSERT_TRUE(decoder.get() != NULL);
+ scoped_ptr<FFmpegFileReader> reader(
+ new FFmpegFileReader(WideToASCII(path.value())));
+ ASSERT_TRUE(reader->Initialize());
+ scoped_refptr<FakeMftRenderer> renderer(new FakeMftRenderer(decoder));
+ ASSERT_TRUE(renderer.get());
+
+ // Flush after obtaining 40 decode frames. There are no more key frames after
+ // the first one, so we expect it to stop outputting frames after flush.
+ int flush_at_nth_decoded_frame = 40;
+ renderer->SetFlushCountdown(flush_at_nth_decoded_frame);
+ ASSERT_TRUE(decoder->Init(dev_manager.get(), 0, 0, 111, 222, 0, 0,
+ NewCallback(reader.get(), &FFmpegFileReader::Read),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::WriteCallback),
+ NewCallback(renderer.get(),
+ &FakeMftRenderer::OnDecodeError)));
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(renderer.get(), &FakeMftRenderer::Start));
+ MessageLoop::current()->Run();
+ EXPECT_EQ(82, decoder->frames_read());
+ EXPECT_EQ(decoder->frames_decoded(), flush_at_nth_decoded_frame);
+}
+
} // namespace media