diff options
-rw-r--r-- | media/base/buffers.h | 23 | ||||
-rw-r--r-- | media/base/data_buffer_unittest.cc | 8 | ||||
-rw-r--r-- | media/base/mock_media_filters.h | 7 | ||||
-rw-r--r-- | media/base/pipeline.h | 1 | ||||
-rw-r--r-- | media/base/video_frame_impl.cc | 11 | ||||
-rw-r--r-- | media/base/video_frame_impl.h | 5 | ||||
-rw-r--r-- | media/base/video_frame_impl_unittest.cc | 7 | ||||
-rw-r--r-- | media/filters/audio_renderer_base.cc | 24 | ||||
-rw-r--r-- | media/filters/ffmpeg_audio_decoder.cc | 21 | ||||
-rw-r--r-- | media/filters/ffmpeg_demuxer.cc | 16 | ||||
-rw-r--r-- | media/filters/ffmpeg_demuxer.h | 3 | ||||
-rw-r--r-- | media/filters/ffmpeg_demuxer_unittest.cc | 7 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.cc | 25 | ||||
-rw-r--r-- | media/filters/video_renderer_base.cc | 5 | ||||
-rw-r--r-- | media/filters/video_thread.cc | 25 |
15 files changed, 130 insertions, 58 deletions
diff --git a/media/base/buffers.h b/media/base/buffers.h index 527f9e6..6fa0cfe 100644 --- a/media/base/buffers.h +++ b/media/base/buffers.h @@ -45,10 +45,10 @@ class StreamSample : public base::RefCountedThreadSafe<StreamSample> { return duration_; } - // Indicates that the sample is the last one in the stream. - bool IsEndOfStream() const { - return end_of_stream_; - } + // Indicates that the sample is the last one in the stream. This method is + // pure virtual so implementors can decide when to declare end of stream + // depending on specific data. + virtual bool IsEndOfStream() const = 0; // Indicates that this sample is discontinuous from the previous one, for // example, following a seek. @@ -66,11 +66,6 @@ class StreamSample : public base::RefCountedThreadSafe<StreamSample> { duration_ = duration; } - // Sets the value returned by IsEndOfStream(). - void SetEndOfStream(bool end_of_stream) { - end_of_stream_ = end_of_stream; - } - // Sets the value returned by IsDiscontinuous(). void SetDiscontinuous(bool discontinuous) { discontinuous_ = discontinuous; @@ -79,14 +74,12 @@ class StreamSample : public base::RefCountedThreadSafe<StreamSample> { protected: friend class base::RefCountedThreadSafe<StreamSample>; StreamSample() - : end_of_stream_(false), - discontinuous_(false) { + : discontinuous_(false) { } virtual ~StreamSample() {} base::TimeDelta timestamp_; base::TimeDelta duration_; - bool end_of_stream_; bool discontinuous_; private: @@ -101,6 +94,9 @@ class Buffer : public StreamSample { // Returns the size of valid data in bytes. virtual size_t GetDataSize() const = 0; + + // If there's no data in this buffer, it represents end of stream. + virtual bool IsEndOfStream() const { return GetData() == NULL; } }; @@ -143,6 +139,7 @@ struct VideoSurface { RGBA, // 32bpp RGBA packed 8:8:8:8 YV12, // 12bpp YVU planar 1x1 Y, 2x2 VU samples YV16, // 16bpp YVU planar 1x1 Y, 2x1 VU samples + EMPTY, // An empty frame. }; // Surface format. @@ -176,6 +173,8 @@ class VideoFrame : public StreamSample { // Unlocks the underlying surface, the VideoSurface acquired from Lock is no // longer guaranteed to be valid. virtual void Unlock() = 0; + + virtual bool IsEndOfStream() const = 0; }; } // namespace media diff --git a/media/base/data_buffer_unittest.cc b/media/base/data_buffer_unittest.cc index 21334f8..161e018 100644 --- a/media/base/data_buffer_unittest.cc +++ b/media/base/data_buffer_unittest.cc @@ -28,17 +28,14 @@ TEST(DataBufferTest, Basic) { buffer->SetDuration(kDurationA); EXPECT_TRUE(kTimestampA == buffer->GetTimestamp()); EXPECT_TRUE(kDurationA == buffer->GetDuration()); - EXPECT_FALSE(buffer->IsEndOfStream()); + EXPECT_TRUE(buffer->IsEndOfStream()); + EXPECT_FALSE(buffer->GetData()); EXPECT_FALSE(buffer->IsDiscontinuous()); buffer->SetTimestamp(kTimestampB); buffer->SetDuration(kDurationB); EXPECT_TRUE(kTimestampB == buffer->GetTimestamp()); EXPECT_TRUE(kDurationB == buffer->GetDuration()); - buffer->SetEndOfStream(true); - EXPECT_TRUE(buffer->IsEndOfStream()); - buffer->SetEndOfStream(false); - EXPECT_FALSE(buffer->IsEndOfStream()); buffer->SetDiscontinuous(true); EXPECT_TRUE(buffer->IsDiscontinuous()); buffer->SetDiscontinuous(false); @@ -53,6 +50,7 @@ TEST(DataBufferTest, Basic) { const uint8* read_only_data = buffer->GetData(); ASSERT_EQ(data, read_only_data); ASSERT_EQ(0, memcmp(read_only_data, kData, kDataSize)); + EXPECT_FALSE(buffer->IsEndOfStream()); data = buffer->GetWritableData(kNewDataSize + 10); ASSERT_TRUE(data); diff --git a/media/base/mock_media_filters.h b/media/base/mock_media_filters.h index b90eeb4..1d215c4 100644 --- a/media/base/mock_media_filters.h +++ b/media/base/mock_media_filters.h @@ -459,10 +459,11 @@ class MockVideoDecoder : public VideoDecoder { } else { mock_frame_time_ += config_->frame_duration; if (mock_frame_time_ >= config_->media_duration) { - frame->SetEndOfStream(true); + VideoFrameImpl::CreateEmptyFrame(&frame); + } else { + InitializeYV12Frame(frame, (mock_frame_time_.InSecondsF() / + config_->media_duration.InSecondsF())); } - InitializeYV12Frame(frame, (mock_frame_time_.InSecondsF() / - config_->media_duration.InSecondsF())); callback->Run(frame); } } diff --git a/media/base/pipeline.h b/media/base/pipeline.h index 0120863..66dab005 100644 --- a/media/base/pipeline.h +++ b/media/base/pipeline.h @@ -36,6 +36,7 @@ enum PipelineError { PIPELINE_ERROR_COULD_NOT_RENDER, PIPELINE_ERROR_READ, PIPELINE_ERROR_AUDIO_HARDWARE, + PIPELINE_ERROR_NO_DATA, // Demuxer related errors. DEMUXER_ERROR_COULD_NOT_OPEN, DEMUXER_ERROR_COULD_NOT_PARSE, diff --git a/media/base/video_frame_impl.cc b/media/base/video_frame_impl.cc index 8d9ae59..d132d0c 100644 --- a/media/base/video_frame_impl.cc +++ b/media/base/video_frame_impl.cc @@ -47,6 +47,11 @@ void VideoFrameImpl::CreateFrame(VideoSurface::Format format, *frame_out = alloc_worked ? frame : NULL; } +// static +void VideoFrameImpl::CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out) { + *frame_out = new VideoFrameImpl(VideoSurface::EMPTY, 0, 0); +} + static inline size_t RoundUp(size_t value, size_t alignment) { // Check that |alignment| is a power of 2. DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1))); @@ -122,6 +127,7 @@ VideoFrameImpl::~VideoFrameImpl() { bool VideoFrameImpl::Lock(VideoSurface* surface) { DCHECK(!locked_); + DCHECK_NE(surface_.format, VideoSurface::EMPTY); if (locked_) { memset(surface, 0, sizeof(*surface)); return false; @@ -134,7 +140,12 @@ bool VideoFrameImpl::Lock(VideoSurface* surface) { void VideoFrameImpl::Unlock() { DCHECK(locked_); + DCHECK_NE(surface_.format, VideoSurface::EMPTY); locked_ = false; } +bool VideoFrameImpl::IsEndOfStream() const { + return surface_.format == VideoSurface::EMPTY; +} + } // namespace media diff --git a/media/base/video_frame_impl.h b/media/base/video_frame_impl.h index 36a2f2c..3a48380 100644 --- a/media/base/video_frame_impl.h +++ b/media/base/video_frame_impl.h @@ -22,9 +22,14 @@ class VideoFrameImpl : public VideoFrame { base::TimeDelta duration, scoped_refptr<VideoFrame>* frame_out); + // Creates a frame with format equals to VideoSurface::EMPTY, width, height + // timestamp and duration are all 0. + static void CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out); + // Implementation of VideoFrame. virtual bool Lock(VideoSurface* surface); virtual void Unlock(); + virtual bool IsEndOfStream() const; private: // Clients must use the static CreateFrame() method to create a new frame. diff --git a/media/base/video_frame_impl_unittest.cc b/media/base/video_frame_impl_unittest.cc index c674460..fe0d37c 100644 --- a/media/base/video_frame_impl_unittest.cc +++ b/media/base/video_frame_impl_unittest.cc @@ -91,9 +91,6 @@ TEST(VideoFrameImpl, Basic) { frame->SetDuration(kDurationB); EXPECT_TRUE(kTimestampB == frame->GetTimestamp()); EXPECT_TRUE(kDurationB == frame->GetDuration()); - frame->SetEndOfStream(true); - EXPECT_TRUE(frame->IsEndOfStream()); - frame->SetEndOfStream(false); EXPECT_FALSE(frame->IsEndOfStream()); frame->SetDiscontinuous(true); EXPECT_TRUE(frame->IsDiscontinuous()); @@ -105,4 +102,8 @@ TEST(VideoFrameImpl, Basic) { ExpectFrameColor(frame, 0xFF000000); media::MockVideoDecoder::InitializeYV12Frame(frame, 1.0f); ExpectFrameColor(frame, 0xFFFFFFFF); + + // Test an empty frame. + media::VideoFrameImpl::CreateEmptyFrame(&frame); + EXPECT_TRUE(frame->IsEndOfStream()); } diff --git a/media/filters/audio_renderer_base.cc b/media/filters/audio_renderer_base.cc index 5f51646..7114e60 100644 --- a/media/filters/audio_renderer_base.cc +++ b/media/filters/audio_renderer_base.cc @@ -71,22 +71,30 @@ void AudioRendererBase::OnReadComplete(Buffer* buffer_in) { bool initialization_complete = false; { AutoLock auto_lock(lock_); - if (!stopped_) { + // If we have stopped don't enqueue, same for end of stream buffer since + // it has no data. + if (!stopped_ && !buffer_in->IsEndOfStream()) { queue_.push_back(buffer_in); DCHECK(queue_.size() <= max_queue_size_); } - // See if we're finally initialized. - // TODO(scherkus): handle end of stream cases where we'll never reach max - // queue size. - if (!initialized_ && queue_.size() == max_queue_size_) { - initialization_complete = true; + if (!initialized_) { + // We have completed the initialization when we preroll enough and hit + // the target queue size or the stream has ended. + if (queue_.size() == max_queue_size_ || buffer_in->IsEndOfStream()) + initialization_complete = true; } } if (initialization_complete) { - initialized_ = true; - host_->InitializationComplete(); + if (queue_.empty()) { + // If we say we have initialized but buffer queue is empty, raise an + // error. + host_->Error(PIPELINE_ERROR_NO_DATA); + } else { + initialized_ = true; + host_->InitializationComplete(); + } } } diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc index b2bad80..22cab1e 100644 --- a/media/filters/ffmpeg_audio_decoder.cc +++ b/media/filters/ffmpeg_audio_decoder.cc @@ -99,10 +99,11 @@ void FFmpegAudioDecoder::OnDecode(Buffer* input) { output_buffer_size < 0 || static_cast<size_t>(output_buffer_size) > kOutputBufferSize) { host_->Error(PIPELINE_ERROR_DECODE); - } else if (result == 0) { - // TODO(scherkus): does this mark EOS? Do we want to fulfill a read request - // with zero size? - } else { + return; + } + + // If we have decoded something, enqueue the result. + if (output_buffer_size) { DataBuffer* result_buffer = new DataBuffer(); memcpy(result_buffer->GetWritableData(output_buffer_size), output_buffer, output_buffer_size); @@ -119,6 +120,18 @@ void FFmpegAudioDecoder::OnDecode(Buffer* input) { result_buffer->SetTimestamp(input->GetTimestamp()); EnqueueResult(result_buffer); + return; + } + + // Three conditions to meet to declare end of stream for this decoder: + // 1. FFmpeg didn't read anything. + // 2. FFmpeg didn't output anything. + // 3. An end of stream buffer is received. + if (result == 0 && output_buffer_size == 0 && input->IsEndOfStream()) { + DataBuffer* result_buffer = new DataBuffer(); + result_buffer->SetTimestamp(input->GetTimestamp()); + result_buffer->SetDuration(input->GetDuration()); + EnqueueResult(result_buffer); } } diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc index 085bfcc..dd37a3a 100644 --- a/media/filters/ffmpeg_demuxer.cc +++ b/media/filters/ffmpeg_demuxer.cc @@ -20,7 +20,6 @@ class AVPacketBuffer : public Buffer { AVPacketBuffer(AVPacket* packet, const base::TimeDelta& timestamp, const base::TimeDelta& duration) : packet_(packet) { - DCHECK(packet); SetTimestamp(timestamp); SetDuration(duration); } @@ -318,9 +317,9 @@ void FFmpegDemuxer::DemuxTask() { scoped_ptr<AVPacket> packet(new AVPacket()); int result = av_read_frame(format_context_.get(), packet.get()); if (result < 0) { - // TODO(scherkus): handle end of stream by marking Buffer with the end - // of stream flag. - NOTIMPLEMENTED(); + // If we have reached the end of stream, tell the downstream filters about + // the event. + StreamHasEnded(); return; } @@ -373,6 +372,15 @@ bool FFmpegDemuxer::StreamsHavePendingReads() { return false; } +void FFmpegDemuxer::StreamHasEnded() { + StreamVector::iterator iter; + for (iter = streams_.begin(); iter != streams_.end(); ++iter) { + AVPacket* packet = new AVPacket(); + memset(packet, 0, sizeof(*packet)); + (*iter)->EnqueuePacket(packet); + } +} + AVPacket* FFmpegDemuxer::ClonePacket(AVPacket* packet) { scoped_ptr<AVPacket> clone(new AVPacket()); if (!clone.get() || av_new_packet(clone.get(), packet->size) < 0) { diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h index bb9082e..dd9a4be 100644 --- a/media/filters/ffmpeg_demuxer.h +++ b/media/filters/ffmpeg_demuxer.h @@ -142,6 +142,9 @@ class FFmpegDemuxer : public Demuxer { // Safe to call on any thread. bool StreamsHavePendingReads(); + // Signal all FFmpegDemuxerStream that the stream has ended. + void StreamHasEnded(); + // Helper function to deep copy an AVPacket's data, size and timestamps. // Returns NULL if a packet could not be cloned (i.e., out of memory). AVPacket* ClonePacket(AVPacket* packet); diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc index 157f2ae..1d79bea 100644 --- a/media/filters/ffmpeg_demuxer_unittest.cc +++ b/media/filters/ffmpeg_demuxer_unittest.cc @@ -620,9 +620,10 @@ TEST_F(FFmpegDemuxerTest, ReadAndSeek) { reader = new TestReader(); reader->Read(audio_stream); pipeline_->RunAllTasks(); - EXPECT_FALSE(reader->WaitForRead()); - EXPECT_FALSE(reader->called()); - EXPECT_FALSE(reader->buffer()); + EXPECT_TRUE(reader->WaitForRead()); + EXPECT_TRUE(reader->called()); + ASSERT_TRUE(reader->buffer()); + EXPECT_TRUE(reader->buffer()->IsEndOfStream()); // Manually release buffer, which should release any remaining AVPackets. reader = NULL; diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index 3ec8c6d..d682caa 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -87,12 +87,6 @@ void FFmpegVideoDecoder::OnDecode(Buffer* buffer) { avcodec_flush_buffers(codec_context_); } - // Queue the incoming timestamp. - TimeTuple times; - times.timestamp = buffer->GetTimestamp(); - times.duration = buffer->GetDuration(); - time_queue_.push(times); - // Create a packet for input data. // Due to FFmpeg API changes we no longer have const read-only pointers. AVPacket packet; @@ -121,8 +115,25 @@ void FFmpegVideoDecoder::OnDecode(Buffer* buffer) { // Check for a decoded frame instead of checking the return value of // avcodec_decode_video(). We don't need to stop the pipeline on // decode errors. - if (!decoded) + if (decoded == 0) { + // Three conditions to meet to declare end of stream for this decoder: + // 1. FFmpeg didn't read anything. + // 2. FFmpeg didn't output anything. + // 3. An end of stream buffer is received. + if (result == 0 && buffer->IsEndOfStream()) { + // Create an empty video frame and queue it. + scoped_refptr<VideoFrame> video_frame; + VideoFrameImpl::CreateEmptyFrame(&video_frame); + EnqueueResult(video_frame); + } return; + } + + // Queue the incoming timestamp only if we can decode the frame successfully. + TimeTuple times; + times.timestamp = buffer->GetTimestamp(); + times.duration = buffer->GetDuration(); + time_queue_.push(times); // J (Motion JPEG) versions of YUV are full range 0..255. // Regular (MPEG) YUV is 16..240. diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc index 5996dd0..ccc31fb 100644 --- a/media/filters/video_renderer_base.cc +++ b/media/filters/video_renderer_base.cc @@ -181,7 +181,10 @@ void VideoRendererBase::ReadComplete(VideoFrame* video_frame) { if (video_frame->IsDiscontinuous()) { DiscardAllFrames(); } - if (UpdateQueue(host_->GetPipelineStatus()->GetInterpolatedTime(), + // If this is not an end of stream frame, update the queue with it. + // An end of stream of frame has no data. + if (!video_frame->IsEndOfStream() && + UpdateQueue(host_->GetPipelineStatus()->GetInterpolatedTime(), video_frame)) { request_repaint = preroll_complete_; } diff --git a/media/filters/video_thread.cc b/media/filters/video_thread.cc index fd88757..994713d 100644 --- a/media/filters/video_thread.cc +++ b/media/filters/video_thread.cc @@ -228,17 +228,26 @@ void VideoThread::GetCurrentFrame(scoped_refptr<media::VideoFrame>* frame_out) { void VideoThread::OnReadComplete(VideoFrame* frame) { AutoLock auto_lock(lock_); - frames_.push_back(frame); - DCHECK_LE(frames_.size(), kMaxFrames); + // If this is an end of stream frame, don't enqueue it since it has no data. + if (!frame->IsEndOfStream()) { + frames_.push_back(frame); + DCHECK_LE(frames_.size(), kMaxFrames); + frame_available_.Signal(); + } // Check for our initialization condition. - if (state_ == INITIALIZING && frames_.size() == kMaxFrames) { - state_ = INITIALIZED; - current_frame_ = frames_.front(); - host_->InitializationComplete(); + if (state_ == INITIALIZING && + (frames_.size() == kMaxFrames || frame->IsEndOfStream())) { + if (frames_.empty()) { + // We should have initialized but there's no decoded frames in the queue. + // Raise an error. + host_->Error(PIPELINE_ERROR_NO_DATA); + } else { + state_ = INITIALIZED; + current_frame_ = frames_.front(); + host_->InitializationComplete(); + } } - - frame_available_.Signal(); } void VideoThread::ScheduleRead() { |