summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorpkasting@chromium.org <pkasting@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-19 23:49:42 +0000
committerpkasting@chromium.org <pkasting@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-19 23:49:42 +0000
commit5f2d7017a397fef6245981de1a27c72c8dc89084 (patch)
tree4db716c2e459ed85af20e06ad3875fdc9c2810ac /media
parent7d9664e3f23011ded7a44aafe87b3e4c63b1e013 (diff)
downloadchromium_src-5f2d7017a397fef6245981de1a27c72c8dc89084.zip
chromium_src-5f2d7017a397fef6245981de1a27c72c8dc89084.tar.gz
chromium_src-5f2d7017a397fef6245981de1a27c72c8dc89084.tar.bz2
Convert LOG(INFO) to VLOG(1) - media/.
Also, remove some extra {}s, remove "else" after "return", eliminate a "using". BUG=none TEST=none Review URL: http://codereview.chromium.org/3912001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63140 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/audio/linux/alsa_output.cc5
-rw-r--r--media/audio/simple_sources_unittest.cc5
-rw-r--r--media/base/pipeline_impl.cc13
-rw-r--r--media/base/seekable_buffer_unittest.cc2
-rw-r--r--media/ffmpeg/ffmpeg_unittest.cc6
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc10
-rw-r--r--media/filters/ffmpeg_demuxer.cc4
-rw-r--r--media/filters/video_renderer_base.cc2
-rw-r--r--media/tools/mfdecoder/main.cc32
-rw-r--r--media/tools/mfdecoder/mfdecoder.cc26
-rw-r--r--media/tools/scaler_bench/scaler_bench.cc7
-rw-r--r--media/video/ffmpeg_video_decode_engine.cc12
-rw-r--r--media/video/mft_h264_decode_engine.cc93
13 files changed, 98 insertions, 119 deletions
diff --git a/media/audio/linux/alsa_output.cc b/media/audio/linux/alsa_output.cc
index 3125775..410ed15f 100644
--- a/media/audio/linux/alsa_output.cc
+++ b/media/audio/linux/alsa_output.cc
@@ -369,9 +369,8 @@ void AlsaPcmOutputStream::OpenTask(uint32 packet_size) {
micros_per_packet_ * 2);
if (requested_device_name_ == kAutoSelectDevice) {
playback_handle_ = AutoSelectDevice(latency_micros_);
- if (playback_handle_) {
- LOG(INFO) << "Auto-selected device: " << device_name_;
- }
+ if (playback_handle_)
+ VLOG(1) << "Auto-selected device: " << device_name_;
} else {
device_name_ = requested_device_name_;
playback_handle_ = alsa_util::OpenPlaybackDevice(wrapper_,
diff --git a/media/audio/simple_sources_unittest.cc b/media/audio/simple_sources_unittest.cc
index 2737cd0..bbcbff2 100644
--- a/media/audio/simple_sources_unittest.cc
+++ b/media/audio/simple_sources_unittest.cc
@@ -19,12 +19,11 @@ void GenerateRandomData(char* buffer, uint32 len) {
called = true;
int seed = static_cast<int>(base::Time::Now().ToInternalValue());
srand(seed);
- LOG(INFO) << "Random seed: " << seed;
+ VLOG(1) << "Random seed: " << seed;
}
- for (uint32 i = 0; i < len; i++) {
+ for (uint32 i = 0; i < len; i++)
buffer[i] = static_cast<char>(rand());
- }
}
} // namespace
diff --git a/media/base/pipeline_impl.cc b/media/base/pipeline_impl.cc
index 5d7759c..64a1ecd 100644
--- a/media/base/pipeline_impl.cc
+++ b/media/base/pipeline_impl.cc
@@ -84,12 +84,11 @@ bool PipelineImpl::Start(FilterFactory* factory,
DCHECK(factory);
scoped_ptr<PipelineCallback> callback(start_callback);
if (running_) {
- LOG(INFO) << "Media pipeline is already running";
+ VLOG(1) << "Media pipeline is already running";
return false;
}
- if (!factory) {
+ if (!factory)
return false;
- }
// Kick off initialization!
running_ = true;
@@ -107,7 +106,7 @@ void PipelineImpl::Stop(PipelineCallback* stop_callback) {
AutoLock auto_lock(lock_);
scoped_ptr<PipelineCallback> callback(stop_callback);
if (!running_) {
- LOG(INFO) << "Media pipeline has already stopped";
+ VLOG(1) << "Media pipeline has already stopped";
return;
}
@@ -121,7 +120,7 @@ void PipelineImpl::Seek(base::TimeDelta time,
AutoLock auto_lock(lock_);
scoped_ptr<PipelineCallback> callback(seek_callback);
if (!running_) {
- LOG(INFO) << "Media pipeline must be running";
+ VLOG(1) << "Media pipeline must be running";
return;
}
@@ -438,7 +437,7 @@ PipelineImpl::State PipelineImpl::FindNextState(State current) {
void PipelineImpl::SetError(PipelineError error) {
DCHECK(IsRunning());
DCHECK(error != PIPELINE_OK) << "PIPELINE_OK isn't an error!";
- LOG(INFO) << "Media pipeline error: " << error;
+ VLOG(1) << "Media pipeline error: " << error;
message_loop_->PostTask(FROM_HERE,
NewRunnableMethod(this, &PipelineImpl::ErrorChangedTask, error));
@@ -764,7 +763,7 @@ void PipelineImpl::SeekTask(base::TimeDelta time,
if (state_ != kStarted && state_ != kEnded) {
// TODO(scherkus): should we run the callback? I'm tempted to say the API
// will only execute the first Seek() request.
- LOG(INFO) << "Media pipeline has not started, ignoring seek to "
+ VLOG(1) << "Media pipeline has not started, ignoring seek to "
<< time.InMicroseconds();
delete seek_callback;
return;
diff --git a/media/base/seekable_buffer_unittest.cc b/media/base/seekable_buffer_unittest.cc
index 286580b..50f5d15 100644
--- a/media/base/seekable_buffer_unittest.cc
+++ b/media/base/seekable_buffer_unittest.cc
@@ -25,7 +25,7 @@ class SeekableBufferTest : public testing::Test {
// Setup seed.
size_t seed = static_cast<int32>(base::Time::Now().ToInternalValue());
srand(seed);
- LOG(INFO) << "Random seed: " << seed;
+ VLOG(1) << "Random seed: " << seed;
// Creates a test data.
for (size_t i = 0; i < kDataSize; i++)
diff --git a/media/ffmpeg/ffmpeg_unittest.cc b/media/ffmpeg/ffmpeg_unittest.cc
index 343486e..970b27c 100644
--- a/media/ffmpeg/ffmpeg_unittest.cc
+++ b/media/ffmpeg/ffmpeg_unittest.cc
@@ -636,17 +636,17 @@ TEST_F(FFmpegTest, VideoPlayedCollapse) {
SeekTo(0.5);
ReadRemainingFile();
EXPECT_TRUE(StepDecodeVideo());
- LOG(INFO) << decoded_video_time();
+ VLOG(1) << decoded_video_time();
SeekTo(2.83);
ReadRemainingFile();
EXPECT_TRUE(StepDecodeVideo());
- LOG(INFO) << decoded_video_time();
+ VLOG(1) << decoded_video_time();
SeekTo(0.4);
ReadRemainingFile();
EXPECT_TRUE(StepDecodeVideo());
- LOG(INFO) << decoded_video_time();
+ VLOG(1) << decoded_video_time();
CloseCodecs();
CloseFile();
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index ba3b5d2..178504e 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -186,12 +186,10 @@ void FFmpegAudioDecoder::DoDecode(Buffer* input) {
if (result < 0 ||
output_buffer_size < 0 ||
static_cast<size_t>(output_buffer_size) > kOutputBufferSize) {
- LOG(INFO) << "Error decoding an audio frame with timestamp: "
- << input->GetTimestamp().InMicroseconds() << " us"
- << " , duration: "
- << input->GetDuration().InMicroseconds() << " us"
- << " , packet size: "
- << input->GetDataSize() << " bytes";
+ VLOG(1) << "Error decoding an audio frame with timestamp: "
+ << input->GetTimestamp().InMicroseconds() << " us, duration: "
+ << input->GetDuration().InMicroseconds() << " us, packet size: "
+ << input->GetDataSize() << " bytes";
DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete();
return;
}
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 5869e3a..493108a 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -495,10 +495,10 @@ void FFmpegDemuxer::SeekTask(base::TimeDelta time, FilterCallback* callback) {
// will attempt to use the lowest-index video stream, if present, followed by
// the lowest-index audio stream.
if (av_seek_frame(format_context_, -1, time.InMicroseconds(), flags) < 0) {
- // Use LOG(INFO) instead of NOTIMPLEMENTED() to prevent the message being
+ // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being
// captured from stdout and contaminates testing.
// TODO(scherkus): Implement this properly and signal error (BUG=23447).
- LOG(INFO) << "Not implemented";
+ VLOG(1) << "Not implemented";
}
// Notify we're finished seeking.
diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc
index 950020c..b399f58 100644
--- a/media/filters/video_renderer_base.cc
+++ b/media/filters/video_renderer_base.cc
@@ -273,7 +273,7 @@ void VideoRendererBase::ThreadMain() {
scoped_refptr<VideoFrame> next_frame = frames_queue_ready_.front();
if (next_frame->IsEndOfStream()) {
state_ = kEnded;
- DLOG(INFO) << "Video render gets EOS";
+ DVLOG(1) << "Video render gets EOS";
host()->NotifyEnded();
continue;
}
diff --git a/media/tools/mfdecoder/main.cc b/media/tools/mfdecoder/main.cc
index 1ede4af..ed9220b 100644
--- a/media/tools/mfdecoder/main.cc
+++ b/media/tools/mfdecoder/main.cc
@@ -446,7 +446,7 @@ int main(int argc, char** argv) {
usage();
return -1;
}
- LOG(INFO) << "use_dxva2: " << use_dxva2;
+ VLOG(1) << "use_dxva2: " << use_dxva2;
g_render_to_window = false;
g_render_asap = false;
@@ -462,8 +462,8 @@ int main(int argc, char** argv) {
usage();
return -1;
}
- LOG(INFO) << "g_render_to_window: " << g_render_to_window;
- LOG(INFO) << "g_render_asap: " << g_render_asap;
+ VLOG(1) << "g_render_to_window: " << g_render_to_window
+ << "\ng_render_asap: " << g_render_asap;
scoped_array<wchar_t> file_name(ConvertASCIIStringToUnicode(argv[argc-1]));
if (file_name.get() == NULL) {
@@ -528,8 +528,8 @@ int main(int argc, char** argv) {
}
base::Time start(base::Time::Now());
printf("Decoding started\n");
- LOG(INFO) << "Decoding " << file_name.get()
- << " started at " << start.ToTimeT();
+ VLOG(1) << "Decoding " << file_name.get()
+ << " started at " << start.ToTimeT();
base::AtExitManager exit_manager;
MessageLoopForUI message_loop;
@@ -544,19 +544,15 @@ int main(int argc, char** argv) {
printf("Decoding finished\n");
base::Time end(base::Time::Now());
- LOG(INFO) << "Decoding finished at " << end.ToTimeT();
- LOG(INFO) << "Took " << (end-start).InMilliseconds() << "ms";
- LOG(INFO) << "Number of frames processed: " << g_num_frames;
- LOG(INFO) << "Decode time: " << g_decode_time->InMilliseconds() << "ms";
- LOG(INFO) << "Average decode time: "
- << (g_num_frames == 0 ?
- 0 :
- g_decode_time->InMillisecondsF() / g_num_frames);
- LOG(INFO) << "Render time: " << g_render_time->InMilliseconds() << "ms";
- LOG(INFO) << "Average render time: "
- << (g_num_frames == 0 ?
- 0 :
- g_render_time->InMillisecondsF() / g_num_frames);
+ VLOG(1) << "Decoding finished at " << end.ToTimeT()
+ << "\nTook " << (end-start).InMilliseconds() << "ms"
+ << "\nNumber of frames processed: " << g_num_frames
+ << "\nDecode time: " << g_decode_time->InMilliseconds() << "ms"
+ << "\nAverage decode time: " << ((g_num_frames == 0) ?
+ 0 : (g_decode_time->InMillisecondsF() / g_num_frames))
+ << "\nRender time: " << g_render_time->InMilliseconds() << "ms"
+ << "\nAverage render time: " << ((g_num_frames == 0) ?
+ 0 : (g_render_time->InMillisecondsF() / g_num_frames));
printf("Normal termination\n");
delete g_decode_time;
delete g_render_time;
diff --git a/media/tools/mfdecoder/mfdecoder.cc b/media/tools/mfdecoder/mfdecoder.cc
index e629a4a..a2b3ba97 100644
--- a/media/tools/mfdecoder/mfdecoder.cc
+++ b/media/tools/mfdecoder/mfdecoder.cc
@@ -1,6 +1,6 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved. Use of this
-// source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifdef WINVER
#undef WINVER
@@ -111,7 +111,7 @@ IMFSample* MFDecoder::ReadVideoSample() {
return NULL;
}
if (output_flags & MF_SOURCE_READERF_ENDOFSTREAM) {
- LOG(INFO) << "Video sample reading has reached the end of stream";
+ VLOG(1) << "Video sample reading has reached the end of stream";
end_of_stream_ = true;
return NULL;
}
@@ -167,7 +167,7 @@ bool MFDecoder::InitSourceReader(const wchar_t* source_url,
LOG(ERROR) << "Failed to create source reader";
return false;
}
- LOG(INFO) << "Source reader created";
+ VLOG(1) << "Source reader created";
return true;
}
@@ -226,7 +226,7 @@ bool MFDecoder::SelectVideoStreamOnly() {
return false;
}
video_stream_index_ = stream_index;
- LOG(INFO) << "Video stream is at " << video_stream_index_;
+ VLOG(1) << "Video stream is at " << video_stream_index_;
}
} else if (hr == MF_E_INVALIDSTREAMNUMBER) {
break; // No more streams, quit.
@@ -253,22 +253,17 @@ bool MFDecoder::InitVideoInfo(IDirect3DDeviceManager9* dev_manager) {
if (FAILED(hr)) {
LOG(ERROR) << "Failed to determine video subtype";
return false;
- } else {
- if (video_subtype == MFVideoFormat_H264) {
- LOG(INFO) << "Video subtype is H.264";
- } else {
- LOG(INFO) << "Video subtype is NOT H.264";
- }
}
+ VLOG(1) << "Video subtype is "
+ << ((video_subtype == MFVideoFormat_H264) ? "" : "NOT ") << "H.264";
hr = MFGetAttributeSize(video_type, MF_MT_FRAME_SIZE,
reinterpret_cast<UINT32*>(&width_),
reinterpret_cast<UINT32*>(&height_));
if (FAILED(hr)) {
LOG(ERROR) << "Failed to determine frame size";
return false;
- } else {
- LOG(INFO) << "Video width: " << width_ << ", height: " << height_;
}
+ VLOG(1) << "Video width: " << width_ << ", height: " << height_;
// Try to change to YV12 output format.
const GUID kOutputVideoSubtype = MFVideoFormat_YV12;
@@ -300,9 +295,8 @@ bool MFDecoder::InitVideoInfo(IDirect3DDeviceManager9* dev_manager) {
if (FAILED(hr)) {
LOG(ERROR) << "Failed to change output video format and determine stride";
return false;
- } else {
- LOG(INFO) << "IMFMediaBuffer stride: " << mfbuffer_stride_;
}
+ VLOG(1) << "IMFMediaBuffer stride: " << mfbuffer_stride_;
// Send a message to the decoder to tell it to use DXVA2.
if (use_dxva2_) {
diff --git a/media/tools/scaler_bench/scaler_bench.cc b/media/tools/scaler_bench/scaler_bench.cc
index 8c3570b..3457d7c 100644
--- a/media/tools/scaler_bench/scaler_bench.cc
+++ b/media/tools/scaler_bench/scaler_bench.cc
@@ -20,7 +20,6 @@
using base::TimeDelta;
using base::TimeTicks;
using media::VideoFrame;
-using std::vector;
namespace {
@@ -32,7 +31,7 @@ int num_frames = 500;
int num_buffers = 50;
double BenchmarkSkia() {
- vector< scoped_refptr<VideoFrame> > source_frames;
+ std::vector<scoped_refptr<VideoFrame> > source_frames;
ScopedVector<SkBitmap> dest_frames;
for (int i = 0; i < num_buffers; i++) {
scoped_refptr<VideoFrame> source_frame;
@@ -88,8 +87,8 @@ double BenchmarkSkia() {
}
double BenchmarkFilter(media::ScaleFilter filter) {
- vector< scoped_refptr<VideoFrame> > source_frames;
- vector< scoped_refptr<VideoFrame> > dest_frames;
+ std::vector<scoped_refptr<VideoFrame> > source_frames;
+ std::vector<scoped_refptr<VideoFrame> > dest_frames;
for (int i = 0; i < num_buffers; i++) {
scoped_refptr<VideoFrame> source_frame;
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index 7a8181b..a03b416 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -65,7 +65,7 @@ void FFmpegVideoDecodeEngine::Initialize(
direct_rendering_ = codec->capabilities & CODEC_CAP_DR1 ? true : false;
#endif
if (direct_rendering_) {
- DLOG(INFO) << "direct rendering is used";
+ DVLOG(1) << "direct rendering is used";
allocator_->Initialize(codec_context_, GetSurfaceFormat());
}
}
@@ -211,12 +211,10 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
// Log the problem if we can't decode a video frame and exit early.
if (result < 0) {
- LOG(INFO) << "Error decoding a video frame with timestamp: "
- << buffer->GetTimestamp().InMicroseconds() << " us"
- << " , duration: "
- << buffer->GetDuration().InMicroseconds() << " us"
- << " , packet size: "
- << buffer->GetDataSize() << " bytes";
+ VLOG(1) << "Error decoding a video frame with timestamp: "
+ << buffer->GetTimestamp().InMicroseconds() << " us, duration: "
+ << buffer->GetDuration().InMicroseconds() << " us, packet size: "
+ << buffer->GetDataSize() << " bytes";
// TODO(jiesun): call event_handler_->OnError() instead.
event_handler_->ConsumeVideoFrame(video_frame);
return;
diff --git a/media/video/mft_h264_decode_engine.cc b/media/video/mft_h264_decode_engine.cc
index 4558b7c..9a5bdb0 100644
--- a/media/video/mft_h264_decode_engine.cc
+++ b/media/video/mft_h264_decode_engine.cc
@@ -127,7 +127,7 @@ static IMFSample* CreateInputSample(const uint8* stream, int size,
LOG(ERROR) << "Failed to set current length to " << size;
return NULL;
}
- LOG(INFO) << __FUNCTION__ << " wrote " << size << " bytes into input sample";
+ VLOG(1) << __FUNCTION__ << " wrote " << size << " bytes into input sample";
return sample.Detach();
}
@@ -504,33 +504,32 @@ bool MftH264DecodeEngine::GetStreamsInfoAndBufferReqs() {
LOG(ERROR) << "Failed to get input stream info";
return false;
}
- LOG(INFO) << "Input stream info: ";
- LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency;
-
+ VLOG(1) << "Input stream info:"
+ << "\nMax latency: " << input_stream_info_.hnsMaxLatency
+ << "\nFlags: " << std::hex << std::showbase
+ << input_stream_info_.dwFlags
+ << "\nMin buffer size: " << input_stream_info_.cbSize
+ << "\nMax lookahead: " << input_stream_info_.cbMaxLookahead
+ << "\nAlignment: " << input_stream_info_.cbAlignment;
// There should be three flags, one for requiring a whole frame be in a
// single sample, one for requiring there be one buffer only in a single
// sample, and one that specifies a fixed sample size. (as in cbSize)
- LOG(INFO) << "Flags: "
- << std::hex << std::showbase << input_stream_info_.dwFlags;
CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
- LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize;
- LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
- LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment;
hr = decode_engine_->GetOutputStreamInfo(0, &output_stream_info_);
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get output stream info";
return false;
}
- LOG(INFO) << "Output stream info: ";
+ VLOG(1) << "Output stream info:"
+ << "\nFlags: " << std::hex << std::showbase
+ << output_stream_info_.dwFlags
+ << "\nMin buffer size: " << output_stream_info_.cbSize
+ << "\nAlignment: " << output_stream_info_.cbAlignment;
// The flags here should be the same and mean the same thing, except when
// DXVA is enabled, there is an extra 0x100 flag meaning decoder will
// allocate its own sample.
- LOG(INFO) << "Flags: "
- << std::hex << std::showbase << output_stream_info_.dwFlags;
CHECK_EQ(output_stream_info_.dwFlags, use_dxva_ ? 0x107u : 0x7u);
- LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize;
- LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment;
return true;
}
@@ -565,7 +564,7 @@ bool MftH264DecodeEngine::DoDecode() {
IMFCollection* events = output_data_buffer.pEvents;
if (events != NULL) {
- LOG(INFO) << "Got events from ProcessOuput, but discarding";
+ VLOG(1) << "Got events from ProcessOuput, but discarding";
events->Release();
}
@@ -581,11 +580,11 @@ bool MftH264DecodeEngine::DoDecode() {
// event_handler_->OnFormatChange(info_.stream_info);
event_handler_->ProduceVideoSample(NULL);
return true;
- } else {
- event_handler_->OnError();
- return false;
}
- } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ event_handler_->OnError();
+ return false;
+ }
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
if (state_ == kEosDrain) {
// No more output from the decoder. Notify EOS and stop playback.
scoped_refptr<VideoFrame> frame;
@@ -596,12 +595,11 @@ bool MftH264DecodeEngine::DoDecode() {
}
event_handler_->ProduceVideoSample(NULL);
return true;
- } else {
- LOG(ERROR) << "Unhandled error in DoDecode()";
- state_ = MftH264DecodeEngine::kStopped;
- event_handler_->OnError();
- return false;
}
+ LOG(ERROR) << "Unhandled error in DoDecode()";
+ state_ = MftH264DecodeEngine::kStopped;
+ event_handler_->OnError();
+ return false;
}
// We succeeded in getting an output sample.
@@ -657,32 +655,31 @@ bool MftH264DecodeEngine::DoDecode() {
NewRunnableMethod(this, &MftH264DecodeEngine::OnUploadVideoFrameDone,
surface, output_frames_[0]));
return true;
- } else {
- // TODO(hclam): Remove this branch.
- // Not DXVA.
- VideoFrame::CreateFrame(info_.stream_info.surface_format,
- info_.stream_info.surface_width,
- info_.stream_info.surface_height,
- TimeDelta::FromMicroseconds(timestamp),
- TimeDelta::FromMicroseconds(duration),
- &frame);
- if (!frame.get()) {
- LOG(ERROR) << "Failed to allocate video frame for yuv plane";
- event_handler_->OnError();
- return true;
- }
- uint8* src_y;
- DWORD max_length, current_length;
- HRESULT hr = output_buffer->Lock(&src_y, &max_length, &current_length);
- if (FAILED(hr))
- return true;
- uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
-
- memcpy(dst_y, src_y, current_length);
- CHECK(SUCCEEDED(output_buffer->Unlock()));
- event_handler_->ConsumeVideoFrame(frame);
+ }
+ // TODO(hclam): Remove this branch.
+ // Not DXVA.
+ VideoFrame::CreateFrame(info_.stream_info.surface_format,
+ info_.stream_info.surface_width,
+ info_.stream_info.surface_height,
+ TimeDelta::FromMicroseconds(timestamp),
+ TimeDelta::FromMicroseconds(duration),
+ &frame);
+ if (!frame.get()) {
+ LOG(ERROR) << "Failed to allocate video frame for yuv plane";
+ event_handler_->OnError();
return true;
}
+ uint8* src_y;
+ DWORD max_length, current_length;
+ hr = output_buffer->Lock(&src_y, &max_length, &current_length);
+ if (FAILED(hr))
+ return true;
+ uint8* dst_y = static_cast<uint8*>(frame->data(VideoFrame::kYPlane));
+
+ memcpy(dst_y, src_y, current_length);
+ CHECK(SUCCEEDED(output_buffer->Unlock()));
+ event_handler_->ConsumeVideoFrame(frame);
+ return true;
}
void MftH264DecodeEngine::OnUploadVideoFrameDone(