summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorservolk <servolk@chromium.org>2016-01-12 19:47:57 -0800
committerCommit bot <commit-bot@chromium.org>2016-01-13 03:49:16 +0000
commit7becb217736960b5cb08d0de21a674effbcd626c (patch)
treeadead4c91860dc4f57c02350b814fc2b1b91734f
parent04a9cf00d60912dde3e6fe4caa66b205d14acb68 (diff)
downloadchromium_src-7becb217736960b5cb08d0de21a674effbcd626c.zip
chromium_src-7becb217736960b5cb08d0de21a674effbcd626c.tar.gz
chromium_src-7becb217736960b5cb08d0de21a674effbcd626c.tar.bz2
Rename media::SourceState -> MediaSourceState and move to separate file
BUG=525836 Review URL: https://codereview.chromium.org/1577983005 Cr-Commit-Position: refs/heads/master@{#369099}
-rw-r--r--media/BUILD.gn2
-rw-r--r--media/filters/chunk_demuxer.cc898
-rw-r--r--media/filters/chunk_demuxer.h25
-rw-r--r--media/filters/chunk_demuxer_unittest.cc2
-rw-r--r--media/filters/media_source_state.cc697
-rw-r--r--media/filters/media_source_state.h214
-rw-r--r--media/media.gyp2
7 files changed, 948 insertions, 892 deletions
diff --git a/media/BUILD.gn b/media/BUILD.gn
index e16f1f2..96ccfba 100644
--- a/media/BUILD.gn
+++ b/media/BUILD.gn
@@ -226,6 +226,8 @@ component("media") {
"filters/ivf_parser.h",
"filters/jpeg_parser.cc",
"filters/jpeg_parser.h",
+ "filters/media_source_state.cc",
+ "filters/media_source_state.h",
"filters/opus_constants.cc",
"filters/opus_constants.h",
"filters/source_buffer_range.cc",
diff --git a/media/filters/chunk_demuxer.cc b/media/filters/chunk_demuxer.cc
index 1fd3a83..f8bffcd 100644
--- a/media/filters/chunk_demuxer.cc
+++ b/media/filters/chunk_demuxer.cc
@@ -26,864 +26,6 @@ using base::TimeDelta;
namespace media {
-static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) {
- return queue.back()->timestamp() + queue.back()->duration();
-}
-
-// List of time ranges for each SourceBuffer.
-typedef std::list<Ranges<TimeDelta> > RangesList;
-static Ranges<TimeDelta> ComputeIntersection(const RangesList& activeRanges,
- bool ended) {
- // Implementation of HTMLMediaElement.buffered algorithm in MSE spec.
- // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#dom-htmlmediaelement.buffered
-
- // Step 1: If activeSourceBuffers.length equals 0 then return an empty
- // TimeRanges object and abort these steps.
- if (activeRanges.empty())
- return Ranges<TimeDelta>();
-
- // Step 2: Let active ranges be the ranges returned by buffered for each
- // SourceBuffer object in activeSourceBuffers.
- // Step 3: Let highest end time be the largest range end time in the active
- // ranges.
- TimeDelta highest_end_time;
- for (RangesList::const_iterator itr = activeRanges.begin();
- itr != activeRanges.end(); ++itr) {
- if (!itr->size())
- continue;
-
- highest_end_time = std::max(highest_end_time, itr->end(itr->size() - 1));
- }
-
- // Step 4: Let intersection ranges equal a TimeRange object containing a
- // single range from 0 to highest end time.
- Ranges<TimeDelta> intersection_ranges;
- intersection_ranges.Add(TimeDelta(), highest_end_time);
-
- // Step 5: For each SourceBuffer object in activeSourceBuffers run the
- // following steps:
- for (RangesList::const_iterator itr = activeRanges.begin();
- itr != activeRanges.end(); ++itr) {
- // Step 5.1: Let source ranges equal the ranges returned by the buffered
- // attribute on the current SourceBuffer.
- Ranges<TimeDelta> source_ranges = *itr;
-
- // Step 5.2: If readyState is "ended", then set the end time on the last
- // range in source ranges to highest end time.
- if (ended && source_ranges.size() > 0u) {
- source_ranges.Add(source_ranges.start(source_ranges.size() - 1),
- highest_end_time);
- }
-
- // Step 5.3: Let new intersection ranges equal the intersection between
- // the intersection ranges and the source ranges.
- // Step 5.4: Replace the ranges in intersection ranges with the new
- // intersection ranges.
- intersection_ranges = intersection_ranges.IntersectionWith(source_ranges);
- }
-
- return intersection_ranges;
-}
-
-// Contains state belonging to a source id.
-// TODO: SourceState needs to be moved to a separate file and covered with unit
-// tests (see crbug.com/525836)
-class SourceState {
- public:
- // Callback signature used to create ChunkDemuxerStreams.
- typedef base::Callback<ChunkDemuxerStream*(
- DemuxerStream::Type)> CreateDemuxerStreamCB;
-
- typedef ChunkDemuxer::InitSegmentReceivedCB InitSegmentReceivedCB;
-
- typedef base::Callback<void(
- ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
-
- SourceState(scoped_ptr<StreamParser> stream_parser,
- scoped_ptr<FrameProcessor> frame_processor,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb,
- const scoped_refptr<MediaLog>& media_log);
-
- ~SourceState();
-
- void Init(const StreamParser::InitCB& init_cb,
- bool allow_audio,
- bool allow_video,
- const StreamParser::EncryptedMediaInitDataCB&
- encrypted_media_init_data_cb,
- const NewTextTrackCB& new_text_track_cb);
-
- // Appends new data to the StreamParser.
- // Returns true if the data was successfully appended. Returns false if an
- // error occurred. |*timestamp_offset| is used and possibly updated by the
- // append. |append_window_start| and |append_window_end| correspond to the MSE
- // spec's similarly named source buffer attributes that are used in coded
- // frame processing. |init_segment_received_cb| is run for each new fully
- // parsed initialization segment.
- bool Append(const uint8_t* data,
- size_t length,
- TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset,
- const InitSegmentReceivedCB& init_segment_received_cb);
-
- // Aborts the current append sequence and resets the parser.
- void ResetParserState(TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset);
-
- // Calls Remove(|start|, |end|, |duration|) on all
- // ChunkDemuxerStreams managed by this object.
- void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
-
- // If the buffer is full, attempts to try to free up space, as specified in
- // the "Coded Frame Eviction Algorithm" in the Media Source Extensions Spec.
- // Returns false iff buffer is still full after running eviction.
- // https://w3c.github.io/media-source/#sourcebuffer-coded-frame-eviction
- bool EvictCodedFrames(DecodeTimestamp media_time, size_t newDataSize);
-
- // Returns true if currently parsing a media segment, or false otherwise.
- bool parsing_media_segment() const { return parsing_media_segment_; }
-
- // Sets |frame_processor_|'s sequence mode to |sequence_mode|.
- void SetSequenceMode(bool sequence_mode);
-
- // Signals the coded frame processor to update its group start timestamp to be
- // |timestamp_offset| if it is in sequence append mode.
- void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
-
- // Returns the range of buffered data in this source, capped at |duration|.
- // |ended| - Set to true if end of stream has been signaled and the special
- // end of stream range logic needs to be executed.
- Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration, bool ended) const;
-
- // Returns the highest buffered duration across all streams managed
- // by this object.
- // Returns TimeDelta() if none of the streams contain buffered data.
- TimeDelta GetMaxBufferedDuration() const;
-
- // Helper methods that call methods with similar names on all the
- // ChunkDemuxerStreams managed by this object.
- void StartReturningData();
- void AbortReads();
- void Seek(TimeDelta seek_time);
- void CompletePendingReadIfPossible();
- void OnSetDuration(TimeDelta duration);
- void MarkEndOfStream();
- void UnmarkEndOfStream();
- void Shutdown();
- // Sets the memory limit on each stream of a specific type.
- // |memory_limit| is the maximum number of bytes each stream of type |type|
- // is allowed to hold in its buffer.
- void SetMemoryLimits(DemuxerStream::Type type, size_t memory_limit);
- bool IsSeekWaitingForData() const;
-
- private:
- // Called by the |stream_parser_| when a new initialization segment is
- // encountered.
- // Returns true on a successful call. Returns false if an error occurred while
- // processing decoder configurations.
- bool OnNewConfigs(bool allow_audio, bool allow_video,
- const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config,
- const StreamParser::TextTrackConfigMap& text_configs);
-
- // Called by the |stream_parser_| at the beginning of a new media segment.
- void OnNewMediaSegment();
-
- // Called by the |stream_parser_| at the end of a media segment.
- void OnEndOfMediaSegment();
-
- // Called by the |stream_parser_| when new buffers have been parsed.
- // It processes the new buffers using |frame_processor_|, which includes
- // appending the processed frames to associated demuxer streams for each
- // frame's track.
- // Returns true on a successful call. Returns false if an error occurred while
- // processing the buffers.
- bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers,
- const StreamParser::TextBufferQueueMap& text_map);
-
- void OnSourceInitDone(const StreamParser::InitParameters& params);
-
- // EstimateVideoDataSize uses some heuristics to estimate the size of the
- // video size in the chunk of muxed audio/video data without parsing it.
- // This is used by EvictCodedFrames algorithm, which happens before Append
- // (and therefore before parsing is performed) to prepare space for new data.
- size_t EstimateVideoDataSize(size_t muxed_data_chunk_size) const;
-
- CreateDemuxerStreamCB create_demuxer_stream_cb_;
- NewTextTrackCB new_text_track_cb_;
-
- // During Append(), if OnNewBuffers() coded frame processing updates the
- // timestamp offset then |*timestamp_offset_during_append_| is also updated
- // so Append()'s caller can know the new offset. This pointer is only non-NULL
- // during the lifetime of an Append() call.
- TimeDelta* timestamp_offset_during_append_;
-
- // During Append(), coded frame processing triggered by OnNewBuffers()
- // requires these two attributes. These are only valid during the lifetime of
- // an Append() call.
- TimeDelta append_window_start_during_append_;
- TimeDelta append_window_end_during_append_;
-
- // Set to true if the next buffers appended within the append window
- // represent the start of a new media segment. This flag being set
- // triggers a call to |new_segment_cb_| when the new buffers are
- // appended. The flag is set on actual media segment boundaries and
- // when the "append window" filtering causes discontinuities in the
- // appended data.
- // TODO(wolenetz/acolwell): Investigate if we need this, or if coded frame
- // processing's discontinuity logic is enough. See http://crbug.com/351489.
- bool new_media_segment_;
-
- // Keeps track of whether a media segment is being parsed.
- bool parsing_media_segment_;
-
- // The object used to parse appended data.
- scoped_ptr<StreamParser> stream_parser_;
-
- ChunkDemuxerStream* audio_; // Not owned by |this|.
- ChunkDemuxerStream* video_; // Not owned by |this|.
-
- typedef std::map<StreamParser::TrackId, ChunkDemuxerStream*> TextStreamMap;
- TextStreamMap text_stream_map_; // |this| owns the map's stream pointers.
-
- scoped_ptr<FrameProcessor> frame_processor_;
- scoped_refptr<MediaLog> media_log_;
- StreamParser::InitCB init_cb_;
-
- // During Append(), OnNewConfigs() will trigger the initialization segment
- // received algorithm. This callback is only non-NULL during the lifetime of
- // an Append() call. Note, the MSE spec explicitly disallows this algorithm
- // during an Abort(), since Abort() is allowed only to emit coded frames, and
- // only if the parser is PARSING_MEDIA_SEGMENT (not an INIT segment).
- InitSegmentReceivedCB init_segment_received_cb_;
-
- // Indicates that timestampOffset should be updated automatically during
- // OnNewBuffers() based on the earliest end timestamp of the buffers provided.
- // TODO(wolenetz): Refactor this function while integrating April 29, 2014
- // changes to MSE spec. See http://crbug.com/371499.
- bool auto_update_timestamp_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceState);
-};
-
-SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
- scoped_ptr<FrameProcessor> frame_processor,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb,
- const scoped_refptr<MediaLog>& media_log)
- : create_demuxer_stream_cb_(create_demuxer_stream_cb),
- timestamp_offset_during_append_(NULL),
- new_media_segment_(false),
- parsing_media_segment_(false),
- stream_parser_(stream_parser.release()),
- audio_(NULL),
- video_(NULL),
- frame_processor_(frame_processor.release()),
- media_log_(media_log),
- auto_update_timestamp_offset_(false) {
- DCHECK(!create_demuxer_stream_cb_.is_null());
- DCHECK(frame_processor_);
-}
-
-SourceState::~SourceState() {
- Shutdown();
-
- STLDeleteValues(&text_stream_map_);
-}
-
-void SourceState::Init(
- const StreamParser::InitCB& init_cb,
- bool allow_audio,
- bool allow_video,
- const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
- const NewTextTrackCB& new_text_track_cb) {
- new_text_track_cb_ = new_text_track_cb;
- init_cb_ = init_cb;
-
- stream_parser_->Init(
- base::Bind(&SourceState::OnSourceInitDone, base::Unretained(this)),
- base::Bind(&SourceState::OnNewConfigs, base::Unretained(this),
- allow_audio, allow_video),
- base::Bind(&SourceState::OnNewBuffers, base::Unretained(this)),
- new_text_track_cb_.is_null(), encrypted_media_init_data_cb,
- base::Bind(&SourceState::OnNewMediaSegment, base::Unretained(this)),
- base::Bind(&SourceState::OnEndOfMediaSegment, base::Unretained(this)),
- media_log_);
-}
-
-void SourceState::SetSequenceMode(bool sequence_mode) {
- DCHECK(!parsing_media_segment_);
-
- frame_processor_->SetSequenceMode(sequence_mode);
-}
-
-void SourceState::SetGroupStartTimestampIfInSequenceMode(
- base::TimeDelta timestamp_offset) {
- DCHECK(!parsing_media_segment_);
-
- frame_processor_->SetGroupStartTimestampIfInSequenceMode(timestamp_offset);
-}
-
-bool SourceState::Append(
- const uint8_t* data,
- size_t length,
- TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset,
- const InitSegmentReceivedCB& init_segment_received_cb) {
- DCHECK(timestamp_offset);
- DCHECK(!timestamp_offset_during_append_);
- DCHECK(!init_segment_received_cb.is_null());
- DCHECK(init_segment_received_cb_.is_null());
- append_window_start_during_append_ = append_window_start;
- append_window_end_during_append_ = append_window_end;
- timestamp_offset_during_append_ = timestamp_offset;
- init_segment_received_cb_= init_segment_received_cb;
-
- // TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with
- // append window and timestamp offset pointer. See http://crbug.com/351454.
- bool result = stream_parser_->Parse(data, length);
- if (!result) {
- MEDIA_LOG(ERROR, media_log_)
- << __FUNCTION__ << ": stream parsing failed."
- << " Data size=" << length
- << " append_window_start=" << append_window_start.InSecondsF()
- << " append_window_end=" << append_window_end.InSecondsF();
- }
- timestamp_offset_during_append_ = NULL;
- init_segment_received_cb_.Reset();
- return result;
-}
-
-void SourceState::ResetParserState(TimeDelta append_window_start,
- TimeDelta append_window_end,
- base::TimeDelta* timestamp_offset) {
- DCHECK(timestamp_offset);
- DCHECK(!timestamp_offset_during_append_);
- timestamp_offset_during_append_ = timestamp_offset;
- append_window_start_during_append_ = append_window_start;
- append_window_end_during_append_ = append_window_end;
-
- stream_parser_->Flush();
- timestamp_offset_during_append_ = NULL;
-
- frame_processor_->Reset();
- parsing_media_segment_ = false;
-}
-
-void SourceState::Remove(TimeDelta start, TimeDelta end, TimeDelta duration) {
- if (audio_)
- audio_->Remove(start, end, duration);
-
- if (video_)
- video_->Remove(start, end, duration);
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->Remove(start, end, duration);
- }
-}
-
-size_t SourceState::EstimateVideoDataSize(size_t muxed_data_chunk_size) const {
- DCHECK(audio_);
- DCHECK(video_);
-
- size_t videoBufferedSize = video_->GetBufferedSize();
- size_t audioBufferedSize = audio_->GetBufferedSize();
- if (videoBufferedSize == 0 || audioBufferedSize == 0) {
- // At this point either audio or video buffer is empty, which means buffer
- // levels are probably low anyway and we should have enough space in the
- // buffers for appending new data, so just take a very rough guess.
- return muxed_data_chunk_size * 7 / 8;
- }
-
- // We need to estimate how much audio and video data is going to be in the
- // newly appended data chunk to make space for the new data. And we need to do
- // that without parsing the data (which will happen later, in the Append
- // phase). So for now we can only rely on some heuristic here. Let's assume
- // that the proportion of the audio/video in the new data chunk is the same as
- // the current ratio of buffered audio/video.
- // Longer term this should go away once we further change the MSE GC algorithm
- // to work across all streams of a SourceBuffer (see crbug.com/520704).
- double videoBufferedSizeF = static_cast<double>(videoBufferedSize);
- double audioBufferedSizeF = static_cast<double>(audioBufferedSize);
-
- double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF;
- CHECK_GT(totalBufferedSizeF, 0.0);
-
- double videoRatio = videoBufferedSizeF / totalBufferedSizeF;
- CHECK_GE(videoRatio, 0.0);
- CHECK_LE(videoRatio, 1.0);
- double estimatedVideoSize = muxed_data_chunk_size * videoRatio;
- return static_cast<size_t>(estimatedVideoSize);
-}
-
-bool SourceState::EvictCodedFrames(DecodeTimestamp media_time,
- size_t newDataSize) {
- bool success = true;
-
- DVLOG(3) << __FUNCTION__ << " media_time=" << media_time.InSecondsF()
- << " newDataSize=" << newDataSize
- << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
- << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
-
- size_t newAudioSize = 0;
- size_t newVideoSize = 0;
- if (audio_ && video_) {
- newVideoSize = EstimateVideoDataSize(newDataSize);
- newAudioSize = newDataSize - newVideoSize;
- } else if (video_) {
- newVideoSize = newDataSize;
- } else if (audio_) {
- newAudioSize = newDataSize;
- }
-
- DVLOG(3) << __FUNCTION__ << " estimated audio/video sizes: "
- << " newVideoSize=" << newVideoSize
- << " newAudioSize=" << newAudioSize;
-
- if (audio_)
- success = audio_->EvictCodedFrames(media_time, newAudioSize) && success;
-
- if (video_)
- success = video_->EvictCodedFrames(media_time, newVideoSize) && success;
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- success = itr->second->EvictCodedFrames(media_time, 0) && success;
- }
-
- DVLOG(3) << __FUNCTION__ << " result=" << success
- << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
- << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
-
- return success;
-}
-
-Ranges<TimeDelta> SourceState::GetBufferedRanges(TimeDelta duration,
- bool ended) const {
- // TODO(acolwell): When we start allowing disabled tracks we'll need to update
- // this code to only add ranges from active tracks.
- RangesList ranges_list;
- if (audio_)
- ranges_list.push_back(audio_->GetBufferedRanges(duration));
-
- if (video_)
- ranges_list.push_back(video_->GetBufferedRanges(duration));
-
- for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- ranges_list.push_back(itr->second->GetBufferedRanges(duration));
- }
-
- return ComputeIntersection(ranges_list, ended);
-}
-
-TimeDelta SourceState::GetMaxBufferedDuration() const {
- TimeDelta max_duration;
-
- if (audio_)
- max_duration = std::max(max_duration, audio_->GetBufferedDuration());
-
- if (video_)
- max_duration = std::max(max_duration, video_->GetBufferedDuration());
-
- for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- max_duration = std::max(max_duration, itr->second->GetBufferedDuration());
- }
-
- return max_duration;
-}
-
-void SourceState::StartReturningData() {
- if (audio_)
- audio_->StartReturningData();
-
- if (video_)
- video_->StartReturningData();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->StartReturningData();
- }
-}
-
-void SourceState::AbortReads() {
- if (audio_)
- audio_->AbortReads();
-
- if (video_)
- video_->AbortReads();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->AbortReads();
- }
-}
-
-void SourceState::Seek(TimeDelta seek_time) {
- if (audio_)
- audio_->Seek(seek_time);
-
- if (video_)
- video_->Seek(seek_time);
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->Seek(seek_time);
- }
-}
-
-void SourceState::CompletePendingReadIfPossible() {
- if (audio_)
- audio_->CompletePendingReadIfPossible();
-
- if (video_)
- video_->CompletePendingReadIfPossible();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->CompletePendingReadIfPossible();
- }
-}
-
-void SourceState::OnSetDuration(TimeDelta duration) {
- if (audio_)
- audio_->OnSetDuration(duration);
-
- if (video_)
- video_->OnSetDuration(duration);
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->OnSetDuration(duration);
- }
-}
-
-void SourceState::MarkEndOfStream() {
- if (audio_)
- audio_->MarkEndOfStream();
-
- if (video_)
- video_->MarkEndOfStream();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->MarkEndOfStream();
- }
-}
-
-void SourceState::UnmarkEndOfStream() {
- if (audio_)
- audio_->UnmarkEndOfStream();
-
- if (video_)
- video_->UnmarkEndOfStream();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->UnmarkEndOfStream();
- }
-}
-
-void SourceState::Shutdown() {
- if (audio_)
- audio_->Shutdown();
-
- if (video_)
- video_->Shutdown();
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->Shutdown();
- }
-}
-
-void SourceState::SetMemoryLimits(DemuxerStream::Type type,
- size_t memory_limit) {
- switch (type) {
- case DemuxerStream::AUDIO:
- if (audio_)
- audio_->SetStreamMemoryLimit(memory_limit);
- break;
- case DemuxerStream::VIDEO:
- if (video_)
- video_->SetStreamMemoryLimit(memory_limit);
- break;
- case DemuxerStream::TEXT:
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->SetStreamMemoryLimit(memory_limit);
- }
- break;
- case DemuxerStream::UNKNOWN:
- case DemuxerStream::NUM_TYPES:
- NOTREACHED();
- break;
- }
-}
-
-bool SourceState::IsSeekWaitingForData() const {
- if (audio_ && audio_->IsSeekWaitingForData())
- return true;
-
- if (video_ && video_->IsSeekWaitingForData())
- return true;
-
- // NOTE: We are intentionally not checking the text tracks
- // because text tracks are discontinuous and may not have data
- // for the seek position. This is ok and playback should not be
- // stalled because we don't have cues. If cues, with timestamps after
- // the seek time, eventually arrive they will be delivered properly
- // in response to ChunkDemuxerStream::Read() calls.
-
- return false;
-}
-
-bool SourceState::OnNewConfigs(
- bool allow_audio, bool allow_video,
- const AudioDecoderConfig& audio_config,
- const VideoDecoderConfig& video_config,
- const StreamParser::TextTrackConfigMap& text_configs) {
- DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video
- << ", " << audio_config.IsValidConfig()
- << ", " << video_config.IsValidConfig() << ")";
- DCHECK(!init_segment_received_cb_.is_null());
-
- if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) {
- DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!";
- return false;
- }
-
- // Signal an error if we get configuration info for stream types that weren't
- // specified in AddId() or more configs after a stream is initialized.
- if (allow_audio != audio_config.IsValidConfig()) {
- MEDIA_LOG(ERROR, media_log_)
- << "Initialization segment"
- << (audio_config.IsValidConfig() ? " has" : " does not have")
- << " an audio track, but the mimetype"
- << (allow_audio ? " specifies" : " does not specify")
- << " an audio codec.";
- return false;
- }
-
- if (allow_video != video_config.IsValidConfig()) {
- MEDIA_LOG(ERROR, media_log_)
- << "Initialization segment"
- << (video_config.IsValidConfig() ? " has" : " does not have")
- << " a video track, but the mimetype"
- << (allow_video ? " specifies" : " does not specify")
- << " a video codec.";
- return false;
- }
-
- bool success = true;
- if (audio_config.IsValidConfig()) {
- if (!audio_) {
- media_log_->SetBooleanProperty("found_audio_stream", true);
- }
- if (!audio_ ||
- audio_->audio_decoder_config().codec() != audio_config.codec()) {
- media_log_->SetStringProperty("audio_codec_name",
- GetCodecName(audio_config.codec()));
- }
-
- if (!audio_) {
- audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO);
-
- if (!audio_) {
- DVLOG(1) << "Failed to create an audio stream.";
- return false;
- }
-
- if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) {
- DVLOG(1) << "Failed to add audio track to frame processor.";
- return false;
- }
- }
-
- frame_processor_->OnPossibleAudioConfigUpdate(audio_config);
- success &= audio_->UpdateAudioConfig(audio_config, media_log_);
- }
-
- if (video_config.IsValidConfig()) {
- if (!video_) {
- media_log_->SetBooleanProperty("found_video_stream", true);
- }
- if (!video_ ||
- video_->video_decoder_config().codec() != video_config.codec()) {
- media_log_->SetStringProperty("video_codec_name",
- GetCodecName(video_config.codec()));
- }
-
- if (!video_) {
- video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO);
-
- if (!video_) {
- DVLOG(1) << "Failed to create a video stream.";
- return false;
- }
-
- if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) {
- DVLOG(1) << "Failed to add video track to frame processor.";
- return false;
- }
- }
-
- success &= video_->UpdateVideoConfig(video_config, media_log_);
- }
-
- typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr;
- if (text_stream_map_.empty()) {
- for (TextConfigItr itr = text_configs.begin();
- itr != text_configs.end(); ++itr) {
- ChunkDemuxerStream* const text_stream =
- create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
- if (!frame_processor_->AddTrack(itr->first, text_stream)) {
- success &= false;
- MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID "
- << itr->first << " to frame processor.";
- break;
- }
- text_stream->UpdateTextConfig(itr->second, media_log_);
- text_stream_map_[itr->first] = text_stream;
- new_text_track_cb_.Run(text_stream, itr->second);
- }
- } else {
- const size_t text_count = text_stream_map_.size();
- if (text_configs.size() != text_count) {
- success &= false;
- MEDIA_LOG(ERROR, media_log_)
- << "The number of text track configs changed.";
- } else if (text_count == 1) {
- TextConfigItr config_itr = text_configs.begin();
- TextStreamMap::iterator stream_itr = text_stream_map_.begin();
- ChunkDemuxerStream* text_stream = stream_itr->second;
- TextTrackConfig old_config = text_stream->text_track_config();
- TextTrackConfig new_config(config_itr->second.kind(),
- config_itr->second.label(),
- config_itr->second.language(),
- old_config.id());
- if (!new_config.Matches(old_config)) {
- success &= false;
- MEDIA_LOG(ERROR, media_log_)
- << "New text track config does not match old one.";
- } else {
- StreamParser::TrackId old_id = stream_itr->first;
- StreamParser::TrackId new_id = config_itr->first;
- if (new_id != old_id) {
- if (frame_processor_->UpdateTrack(old_id, new_id)) {
- text_stream_map_.clear();
- text_stream_map_[config_itr->first] = text_stream;
- } else {
- success &= false;
- MEDIA_LOG(ERROR, media_log_)
- << "Error remapping single text track number";
- }
- }
- }
- } else {
- for (TextConfigItr config_itr = text_configs.begin();
- config_itr != text_configs.end(); ++config_itr) {
- TextStreamMap::iterator stream_itr =
- text_stream_map_.find(config_itr->first);
- if (stream_itr == text_stream_map_.end()) {
- success &= false;
- MEDIA_LOG(ERROR, media_log_)
- << "Unexpected text track configuration for track ID "
- << config_itr->first;
- break;
- }
-
- const TextTrackConfig& new_config = config_itr->second;
- ChunkDemuxerStream* stream = stream_itr->second;
- TextTrackConfig old_config = stream->text_track_config();
- if (!new_config.Matches(old_config)) {
- success &= false;
- MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID "
- << config_itr->first
- << " does not match old one.";
- break;
- }
- }
- }
- }
-
- frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint();
-
- DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed");
- if (success)
- init_segment_received_cb_.Run();
-
- return success;
-}
-
-void SourceState::OnNewMediaSegment() {
- DVLOG(2) << "OnNewMediaSegment()";
- parsing_media_segment_ = true;
- new_media_segment_ = true;
-}
-
-void SourceState::OnEndOfMediaSegment() {
- DVLOG(2) << "OnEndOfMediaSegment()";
- parsing_media_segment_ = false;
- new_media_segment_ = false;
-}
-
-bool SourceState::OnNewBuffers(
- const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers,
- const StreamParser::TextBufferQueueMap& text_map) {
- DVLOG(2) << "OnNewBuffers()";
- DCHECK(timestamp_offset_during_append_);
- DCHECK(parsing_media_segment_);
-
- const TimeDelta timestamp_offset_before_processing =
- *timestamp_offset_during_append_;
-
- // Calculate the new timestamp offset for audio/video tracks if the stream
- // parser has requested automatic updates.
- TimeDelta new_timestamp_offset = timestamp_offset_before_processing;
- if (auto_update_timestamp_offset_) {
- const bool have_audio_buffers = !audio_buffers.empty();
- const bool have_video_buffers = !video_buffers.empty();
- if (have_audio_buffers && have_video_buffers) {
- new_timestamp_offset +=
- std::min(EndTimestamp(audio_buffers), EndTimestamp(video_buffers));
- } else if (have_audio_buffers) {
- new_timestamp_offset += EndTimestamp(audio_buffers);
- } else if (have_video_buffers) {
- new_timestamp_offset += EndTimestamp(video_buffers);
- }
- }
-
- if (!frame_processor_->ProcessFrames(audio_buffers,
- video_buffers,
- text_map,
- append_window_start_during_append_,
- append_window_end_during_append_,
- &new_media_segment_,
- timestamp_offset_during_append_)) {
- return false;
- }
-
- // Only update the timestamp offset if the frame processor hasn't already.
- if (auto_update_timestamp_offset_ &&
- timestamp_offset_before_processing == *timestamp_offset_during_append_) {
- *timestamp_offset_during_append_ = new_timestamp_offset;
- }
-
- return true;
-}
-
-void SourceState::OnSourceInitDone(const StreamParser::InitParameters& params) {
- auto_update_timestamp_offset_ = params.auto_update_timestamp_offset;
- base::ResetAndReturn(&init_cb_).Run(params);
-}
-
ChunkDemuxerStream::ChunkDemuxerStream(Type type,
bool splice_frames_enabled)
: type_(type),
@@ -933,7 +75,7 @@ bool ChunkDemuxerStream::IsSeekWaitingForData() const {
base::AutoLock auto_lock(lock_);
// This method should not be called for text tracks. See the note in
- // SourceState::IsSeekWaitingForData().
+ // MediaSourceState::IsSeekWaitingForData().
DCHECK_NE(type_, DemuxerStream::TEXT);
return stream_->IsSeekPending();
@@ -1386,12 +528,12 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
base::Unretained(this)),
media_log_));
- scoped_ptr<SourceState> source_state(new SourceState(
+ scoped_ptr<MediaSourceState> source_state(new MediaSourceState(
std::move(stream_parser), std::move(frame_processor),
base::Bind(&ChunkDemuxer::CreateDemuxerStream, base::Unretained(this)),
media_log_));
- SourceState::NewTextTrackCB new_text_track_cb;
+ MediaSourceState::NewTextTrackCB new_text_track_cb;
if (enable_text_) {
new_text_track_cb = base::Bind(&ChunkDemuxer::OnNewTextTrack,
@@ -1424,7 +566,7 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges(const std::string& id) const {
base::AutoLock auto_lock(lock_);
DCHECK(!id.empty());
- SourceStateMap::const_iterator itr = source_state_map_.find(id);
+ MediaSourceStateMap::const_iterator itr = source_state_map_.find(id);
DCHECK(itr != source_state_map_.end());
return itr->second->GetBufferedRanges(duration_, state_ == ENDED);
@@ -1445,7 +587,7 @@ bool ChunkDemuxer::EvictCodedFrames(const std::string& id,
DecodeTimestamp::FromPresentationTime(currentMediaTime);
DCHECK(!id.empty());
- SourceStateMap::const_iterator itr = source_state_map_.find(id);
+ MediaSourceStateMap::const_iterator itr = source_state_map_.find(id);
if (itr == source_state_map_.end()) {
LOG(WARNING) << __FUNCTION__ << " stream " << id << " not found";
return false;
@@ -1460,7 +602,7 @@ void ChunkDemuxer::AppendData(
TimeDelta append_window_start,
TimeDelta append_window_end,
TimeDelta* timestamp_offset,
- const InitSegmentReceivedCB& init_segment_received_cb) {
+ const MediaSourceState::InitSegmentReceivedCB& init_segment_received_cb) {
DVLOG(1) << "AppendData(" << id << ", " << length << ")";
DCHECK(!id.empty());
@@ -1619,7 +761,7 @@ void ChunkDemuxer::SetDuration(double duration) {
duration_ = duration_td;
host_->SetDuration(duration_);
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->OnSetDuration(duration_);
}
@@ -1672,7 +814,7 @@ void ChunkDemuxer::MarkEndOfStream(PipelineStatus status) {
}
bool old_waiting_for_data = IsSeekWaitingForData_Locked();
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->MarkEndOfStream();
}
@@ -1701,7 +843,7 @@ void ChunkDemuxer::UnmarkEndOfStream() {
ChangeState_Locked(INITIALIZED);
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->UnmarkEndOfStream();
}
@@ -1724,7 +866,7 @@ void ChunkDemuxer::Shutdown() {
void ChunkDemuxer::SetMemoryLimits(DemuxerStream::Type type,
size_t memory_limit) {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->SetMemoryLimits(type, memory_limit);
}
@@ -1772,7 +914,7 @@ void ChunkDemuxer::ReportError_Locked(PipelineStatus error) {
bool ChunkDemuxer::IsSeekWaitingForData_Locked() const {
lock_.AssertAcquired();
- for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::const_iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
if (itr->second->IsSeekWaitingForData())
return true;
@@ -1905,7 +1047,7 @@ void ChunkDemuxer::DecreaseDurationIfNecessary() {
TimeDelta max_duration;
- for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::const_iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
max_duration = std::max(max_duration,
itr->second->GetMaxBufferedDuration());
@@ -1929,45 +1071,45 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges_Locked() const {
bool ended = state_ == ENDED;
// TODO(acolwell): When we start allowing SourceBuffers that are not active,
// we'll need to update this loop to only add ranges from active sources.
- RangesList ranges_list;
- for (SourceStateMap::const_iterator itr = source_state_map_.begin();
+ MediaSourceState::RangesList ranges_list;
+ for (MediaSourceStateMap::const_iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
ranges_list.push_back(itr->second->GetBufferedRanges(duration_, ended));
}
- return ComputeIntersection(ranges_list, ended);
+ return MediaSourceState::ComputeRangesIntersection(ranges_list, ended);
}
void ChunkDemuxer::StartReturningData() {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->StartReturningData();
}
}
void ChunkDemuxer::AbortPendingReads() {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->AbortReads();
}
}
void ChunkDemuxer::SeekAllSources(TimeDelta seek_time) {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->Seek(seek_time);
}
}
void ChunkDemuxer::CompletePendingReadsIfPossible() {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->CompletePendingReadIfPossible();
}
}
void ChunkDemuxer::ShutdownAllStreams() {
- for (SourceStateMap::iterator itr = source_state_map_.begin();
+ for (MediaSourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->Shutdown();
}
diff --git a/media/filters/chunk_demuxer.h b/media/filters/chunk_demuxer.h
index d07caab..6954793 100644
--- a/media/filters/chunk_demuxer.h
+++ b/media/filters/chunk_demuxer.h
@@ -21,12 +21,12 @@
#include "media/base/demuxer_stream.h"
#include "media/base/ranges.h"
#include "media/base/stream_parser.h"
+#include "media/filters/media_source_state.h"
#include "media/filters/source_buffer_stream.h"
namespace media {
class FFmpegURLProtocol;
-class SourceState;
class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
public:
@@ -157,8 +157,6 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
kReachedIdLimit, // Reached ID limit. We can't handle any more IDs.
};
- typedef base::Closure InitSegmentReceivedCB;
-
// |open_cb| Run when Initialize() is called to signal that the demuxer
// is ready to receive media data via AppenData().
// |encrypted_media_init_data_cb| Run when the demuxer determines that an
@@ -237,13 +235,14 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// processing.
// |init_segment_received_cb| is run for each newly successfully parsed
// initialization segment.
- void AppendData(const std::string& id,
- const uint8_t* data,
- size_t length,
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- base::TimeDelta* timestamp_offset,
- const InitSegmentReceivedCB& init_segment_received_cb);
+ void AppendData(
+ const std::string& id,
+ const uint8_t* data,
+ size_t length,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset,
+ const MediaSourceState::InitSegmentReceivedCB& init_segment_received_cb);
// Aborts parsing the current segment and reset the parser to a state where
// it can accept a new segment.
@@ -333,7 +332,7 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// false if any can not.
bool CanEndOfStream_Locked() const;
- // SourceState callbacks.
+ // MediaSourceState callbacks.
void OnSourceInitDone(const StreamParser::InitParameters& params);
// Creates a DemuxerStream for the specified |type|.
@@ -411,8 +410,8 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
base::Time timeline_offset_;
DemuxerStream::Liveness liveness_;
- typedef std::map<std::string, SourceState*> SourceStateMap;
- SourceStateMap source_state_map_;
+ typedef std::map<std::string, MediaSourceState*> MediaSourceStateMap;
+ MediaSourceStateMap source_state_map_;
// Used to ensure that (1) config data matches the type and codec provided in
// AddId(), (2) only 1 audio and 1 video sources are added, and (3) ids may be
diff --git a/media/filters/chunk_demuxer_unittest.cc b/media/filters/chunk_demuxer_unittest.cc
index 94698d4..5e1f777 100644
--- a/media/filters/chunk_demuxer_unittest.cc
+++ b/media/filters/chunk_demuxer_unittest.cc
@@ -1199,7 +1199,7 @@ class ChunkDemuxerTest : public ::testing::Test {
MockDemuxerHost host_;
scoped_ptr<ChunkDemuxer> demuxer_;
- ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
+ MediaSourceState::InitSegmentReceivedCB init_segment_received_cb_;
base::TimeDelta append_window_start_for_next_append_;
base::TimeDelta append_window_end_for_next_append_;
diff --git a/media/filters/media_source_state.cc b/media/filters/media_source_state.cc
new file mode 100644
index 0000000..f7c021b
--- /dev/null
+++ b/media/filters/media_source_state.cc
@@ -0,0 +1,697 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/media_source_state.h"
+
+#include "base/callback_helpers.h"
+#include "base/stl_util.h"
+#include "media/filters/chunk_demuxer.h"
+#include "media/filters/frame_processor.h"
+#include "media/filters/source_buffer_stream.h"
+
+namespace media {
+
+static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) {
+ return queue.back()->timestamp() + queue.back()->duration();
+}
+
+// List of time ranges for each SourceBuffer.
+// static
+Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection(
+ const RangesList& activeRanges,
+ bool ended) {
+ // TODO(servolk): Perhaps this can be removed in favor of blink implementation
+ // (MediaSource::buffered)? Currently this is only used on Android and for
+ // updating DemuxerHost's buffered ranges during AppendData() as well as
+ // SourceBuffer.buffered property implemetation.
+ // Implementation of HTMLMediaElement.buffered algorithm in MSE spec.
+ // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#dom-htmlmediaelement.buffered
+
+ // Step 1: If activeSourceBuffers.length equals 0 then return an empty
+ // TimeRanges object and abort these steps.
+ if (activeRanges.empty())
+ return Ranges<TimeDelta>();
+
+ // Step 2: Let active ranges be the ranges returned by buffered for each
+ // SourceBuffer object in activeSourceBuffers.
+ // Step 3: Let highest end time be the largest range end time in the active
+ // ranges.
+ TimeDelta highest_end_time;
+ for (RangesList::const_iterator itr = activeRanges.begin();
+ itr != activeRanges.end(); ++itr) {
+ if (!itr->size())
+ continue;
+
+ highest_end_time = std::max(highest_end_time, itr->end(itr->size() - 1));
+ }
+
+ // Step 4: Let intersection ranges equal a TimeRange object containing a
+ // single range from 0 to highest end time.
+ Ranges<TimeDelta> intersection_ranges;
+ intersection_ranges.Add(TimeDelta(), highest_end_time);
+
+ // Step 5: For each SourceBuffer object in activeSourceBuffers run the
+ // following steps:
+ for (RangesList::const_iterator itr = activeRanges.begin();
+ itr != activeRanges.end(); ++itr) {
+ // Step 5.1: Let source ranges equal the ranges returned by the buffered
+ // attribute on the current SourceBuffer.
+ Ranges<TimeDelta> source_ranges = *itr;
+
+ // Step 5.2: If readyState is "ended", then set the end time on the last
+ // range in source ranges to highest end time.
+ if (ended && source_ranges.size() > 0u) {
+ source_ranges.Add(source_ranges.start(source_ranges.size() - 1),
+ highest_end_time);
+ }
+
+ // Step 5.3: Let new intersection ranges equal the intersection between
+ // the intersection ranges and the source ranges.
+ // Step 5.4: Replace the ranges in intersection ranges with the new
+ // intersection ranges.
+ intersection_ranges = intersection_ranges.IntersectionWith(source_ranges);
+ }
+
+ return intersection_ranges;
+}
+
+MediaSourceState::MediaSourceState(
+ scoped_ptr<StreamParser> stream_parser,
+ scoped_ptr<FrameProcessor> frame_processor,
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb,
+ const scoped_refptr<MediaLog>& media_log)
+ : create_demuxer_stream_cb_(create_demuxer_stream_cb),
+ timestamp_offset_during_append_(NULL),
+ new_media_segment_(false),
+ parsing_media_segment_(false),
+ stream_parser_(stream_parser.release()),
+ audio_(NULL),
+ video_(NULL),
+ frame_processor_(frame_processor.release()),
+ media_log_(media_log),
+ auto_update_timestamp_offset_(false) {
+ DCHECK(!create_demuxer_stream_cb_.is_null());
+ DCHECK(frame_processor_);
+}
+
+MediaSourceState::~MediaSourceState() {
+ Shutdown();
+
+ STLDeleteValues(&text_stream_map_);
+}
+
+void MediaSourceState::Init(
+ const StreamParser::InitCB& init_cb,
+ bool allow_audio,
+ bool allow_video,
+ const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewTextTrackCB& new_text_track_cb) {
+ new_text_track_cb_ = new_text_track_cb;
+ init_cb_ = init_cb;
+
+ stream_parser_->Init(
+ base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)),
+ base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this),
+ allow_audio, allow_video),
+ base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)),
+ new_text_track_cb_.is_null(), encrypted_media_init_data_cb,
+ base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)),
+ base::Bind(&MediaSourceState::OnEndOfMediaSegment,
+ base::Unretained(this)),
+ media_log_);
+}
+
+void MediaSourceState::SetSequenceMode(bool sequence_mode) {
+ DCHECK(!parsing_media_segment_);
+
+ frame_processor_->SetSequenceMode(sequence_mode);
+}
+
+void MediaSourceState::SetGroupStartTimestampIfInSequenceMode(
+ base::TimeDelta timestamp_offset) {
+ DCHECK(!parsing_media_segment_);
+
+ frame_processor_->SetGroupStartTimestampIfInSequenceMode(timestamp_offset);
+}
+
+bool MediaSourceState::Append(
+ const uint8_t* data,
+ size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb) {
+ DCHECK(timestamp_offset);
+ DCHECK(!timestamp_offset_during_append_);
+ DCHECK(!init_segment_received_cb.is_null());
+ DCHECK(init_segment_received_cb_.is_null());
+ append_window_start_during_append_ = append_window_start;
+ append_window_end_during_append_ = append_window_end;
+ timestamp_offset_during_append_ = timestamp_offset;
+ init_segment_received_cb_ = init_segment_received_cb;
+
+ // TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with
+ // append window and timestamp offset pointer. See http://crbug.com/351454.
+ bool result = stream_parser_->Parse(data, length);
+ if (!result) {
+ MEDIA_LOG(ERROR, media_log_)
+ << __FUNCTION__ << ": stream parsing failed."
+ << " Data size=" << length
+ << " append_window_start=" << append_window_start.InSecondsF()
+ << " append_window_end=" << append_window_end.InSecondsF();
+ }
+ timestamp_offset_during_append_ = NULL;
+ init_segment_received_cb_.Reset();
+ return result;
+}
+
+void MediaSourceState::ResetParserState(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset) {
+ DCHECK(timestamp_offset);
+ DCHECK(!timestamp_offset_during_append_);
+ timestamp_offset_during_append_ = timestamp_offset;
+ append_window_start_during_append_ = append_window_start;
+ append_window_end_during_append_ = append_window_end;
+
+ stream_parser_->Flush();
+ timestamp_offset_during_append_ = NULL;
+
+ frame_processor_->Reset();
+ parsing_media_segment_ = false;
+}
+
+void MediaSourceState::Remove(TimeDelta start,
+ TimeDelta end,
+ TimeDelta duration) {
+ if (audio_)
+ audio_->Remove(start, end, duration);
+
+ if (video_)
+ video_->Remove(start, end, duration);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Remove(start, end, duration);
+ }
+}
+
+size_t MediaSourceState::EstimateVideoDataSize(
+ size_t muxed_data_chunk_size) const {
+ DCHECK(audio_);
+ DCHECK(video_);
+
+ size_t videoBufferedSize = video_->GetBufferedSize();
+ size_t audioBufferedSize = audio_->GetBufferedSize();
+ if (videoBufferedSize == 0 || audioBufferedSize == 0) {
+ // At this point either audio or video buffer is empty, which means buffer
+ // levels are probably low anyway and we should have enough space in the
+ // buffers for appending new data, so just take a very rough guess.
+ return muxed_data_chunk_size * 7 / 8;
+ }
+
+ // We need to estimate how much audio and video data is going to be in the
+ // newly appended data chunk to make space for the new data. And we need to do
+ // that without parsing the data (which will happen later, in the Append
+ // phase). So for now we can only rely on some heuristic here. Let's assume
+ // that the proportion of the audio/video in the new data chunk is the same as
+ // the current ratio of buffered audio/video.
+ // Longer term this should go away once we further change the MSE GC algorithm
+ // to work across all streams of a SourceBuffer (see crbug.com/520704).
+ double videoBufferedSizeF = static_cast<double>(videoBufferedSize);
+ double audioBufferedSizeF = static_cast<double>(audioBufferedSize);
+
+ double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF;
+ CHECK_GT(totalBufferedSizeF, 0.0);
+
+ double videoRatio = videoBufferedSizeF / totalBufferedSizeF;
+ CHECK_GE(videoRatio, 0.0);
+ CHECK_LE(videoRatio, 1.0);
+ double estimatedVideoSize = muxed_data_chunk_size * videoRatio;
+ return static_cast<size_t>(estimatedVideoSize);
+}
+
+bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time,
+ size_t newDataSize) {
+ bool success = true;
+
+ DVLOG(3) << __FUNCTION__ << " media_time=" << media_time.InSecondsF()
+ << " newDataSize=" << newDataSize
+ << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
+ << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
+
+ size_t newAudioSize = 0;
+ size_t newVideoSize = 0;
+ if (audio_ && video_) {
+ newVideoSize = EstimateVideoDataSize(newDataSize);
+ newAudioSize = newDataSize - newVideoSize;
+ } else if (video_) {
+ newVideoSize = newDataSize;
+ } else if (audio_) {
+ newAudioSize = newDataSize;
+ }
+
+ DVLOG(3) << __FUNCTION__ << " estimated audio/video sizes: "
+ << " newVideoSize=" << newVideoSize
+ << " newAudioSize=" << newAudioSize;
+
+ if (audio_)
+ success = audio_->EvictCodedFrames(media_time, newAudioSize) && success;
+
+ if (video_)
+ success = video_->EvictCodedFrames(media_time, newVideoSize) && success;
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ success = itr->second->EvictCodedFrames(media_time, 0) && success;
+ }
+
+ DVLOG(3) << __FUNCTION__ << " result=" << success
+ << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
+ << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
+
+ return success;
+}
+
+Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration,
+ bool ended) const {
+ // TODO(acolwell): When we start allowing disabled tracks we'll need to update
+ // this code to only add ranges from active tracks.
+ RangesList ranges_list;
+ if (audio_)
+ ranges_list.push_back(audio_->GetBufferedRanges(duration));
+
+ if (video_)
+ ranges_list.push_back(video_->GetBufferedRanges(duration));
+
+ for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ ranges_list.push_back(itr->second->GetBufferedRanges(duration));
+ }
+
+ return ComputeRangesIntersection(ranges_list, ended);
+}
+
+TimeDelta MediaSourceState::GetMaxBufferedDuration() const {
+ TimeDelta max_duration;
+
+ if (audio_)
+ max_duration = std::max(max_duration, audio_->GetBufferedDuration());
+
+ if (video_)
+ max_duration = std::max(max_duration, video_->GetBufferedDuration());
+
+ for (TextStreamMap::const_iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ max_duration = std::max(max_duration, itr->second->GetBufferedDuration());
+ }
+
+ return max_duration;
+}
+
+void MediaSourceState::StartReturningData() {
+ if (audio_)
+ audio_->StartReturningData();
+
+ if (video_)
+ video_->StartReturningData();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->StartReturningData();
+ }
+}
+
+void MediaSourceState::AbortReads() {
+ if (audio_)
+ audio_->AbortReads();
+
+ if (video_)
+ video_->AbortReads();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->AbortReads();
+ }
+}
+
+void MediaSourceState::Seek(TimeDelta seek_time) {
+ if (audio_)
+ audio_->Seek(seek_time);
+
+ if (video_)
+ video_->Seek(seek_time);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Seek(seek_time);
+ }
+}
+
+void MediaSourceState::CompletePendingReadIfPossible() {
+ if (audio_)
+ audio_->CompletePendingReadIfPossible();
+
+ if (video_)
+ video_->CompletePendingReadIfPossible();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->CompletePendingReadIfPossible();
+ }
+}
+
+void MediaSourceState::OnSetDuration(TimeDelta duration) {
+ if (audio_)
+ audio_->OnSetDuration(duration);
+
+ if (video_)
+ video_->OnSetDuration(duration);
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->OnSetDuration(duration);
+ }
+}
+
+void MediaSourceState::MarkEndOfStream() {
+ if (audio_)
+ audio_->MarkEndOfStream();
+
+ if (video_)
+ video_->MarkEndOfStream();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->MarkEndOfStream();
+ }
+}
+
+void MediaSourceState::UnmarkEndOfStream() {
+ if (audio_)
+ audio_->UnmarkEndOfStream();
+
+ if (video_)
+ video_->UnmarkEndOfStream();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->UnmarkEndOfStream();
+ }
+}
+
+void MediaSourceState::Shutdown() {
+ if (audio_)
+ audio_->Shutdown();
+
+ if (video_)
+ video_->Shutdown();
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->Shutdown();
+ }
+}
+
+void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type,
+ size_t memory_limit) {
+ switch (type) {
+ case DemuxerStream::AUDIO:
+ if (audio_)
+ audio_->SetStreamMemoryLimit(memory_limit);
+ break;
+ case DemuxerStream::VIDEO:
+ if (video_)
+ video_->SetStreamMemoryLimit(memory_limit);
+ break;
+ case DemuxerStream::TEXT:
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->SetStreamMemoryLimit(memory_limit);
+ }
+ break;
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::NUM_TYPES:
+ NOTREACHED();
+ break;
+ }
+}
+
+bool MediaSourceState::IsSeekWaitingForData() const {
+ if (audio_ && audio_->IsSeekWaitingForData())
+ return true;
+
+ if (video_ && video_->IsSeekWaitingForData())
+ return true;
+
+ // NOTE: We are intentionally not checking the text tracks
+ // because text tracks are discontinuous and may not have data
+ // for the seek position. This is ok and playback should not be
+ // stalled because we don't have cues. If cues, with timestamps after
+ // the seek time, eventually arrive they will be delivered properly
+ // in response to ChunkDemuxerStream::Read() calls.
+
+ return false;
+}
+
+bool MediaSourceState::OnNewConfigs(
+ bool allow_audio,
+ bool allow_video,
+ const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_configs) {
+ DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video << ", "
+ << audio_config.IsValidConfig() << ", "
+ << video_config.IsValidConfig() << ")";
+ DCHECK(!init_segment_received_cb_.is_null());
+
+ if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) {
+ DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!";
+ return false;
+ }
+
+ // Signal an error if we get configuration info for stream types that weren't
+ // specified in AddId() or more configs after a stream is initialized.
+ if (allow_audio != audio_config.IsValidConfig()) {
+ MEDIA_LOG(ERROR, media_log_)
+ << "Initialization segment"
+ << (audio_config.IsValidConfig() ? " has" : " does not have")
+ << " an audio track, but the mimetype"
+ << (allow_audio ? " specifies" : " does not specify")
+ << " an audio codec.";
+ return false;
+ }
+
+ if (allow_video != video_config.IsValidConfig()) {
+ MEDIA_LOG(ERROR, media_log_)
+ << "Initialization segment"
+ << (video_config.IsValidConfig() ? " has" : " does not have")
+ << " a video track, but the mimetype"
+ << (allow_video ? " specifies" : " does not specify")
+ << " a video codec.";
+ return false;
+ }
+
+ bool success = true;
+ if (audio_config.IsValidConfig()) {
+ if (!audio_) {
+ media_log_->SetBooleanProperty("found_audio_stream", true);
+ }
+ if (!audio_ ||
+ audio_->audio_decoder_config().codec() != audio_config.codec()) {
+ media_log_->SetStringProperty("audio_codec_name",
+ GetCodecName(audio_config.codec()));
+ }
+
+ if (!audio_) {
+ audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO);
+
+ if (!audio_) {
+ DVLOG(1) << "Failed to create an audio stream.";
+ return false;
+ }
+
+ if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) {
+ DVLOG(1) << "Failed to add audio track to frame processor.";
+ return false;
+ }
+ }
+
+ frame_processor_->OnPossibleAudioConfigUpdate(audio_config);
+ success &= audio_->UpdateAudioConfig(audio_config, media_log_);
+ }
+
+ if (video_config.IsValidConfig()) {
+ if (!video_) {
+ media_log_->SetBooleanProperty("found_video_stream", true);
+ }
+ if (!video_ ||
+ video_->video_decoder_config().codec() != video_config.codec()) {
+ media_log_->SetStringProperty("video_codec_name",
+ GetCodecName(video_config.codec()));
+ }
+
+ if (!video_) {
+ video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO);
+
+ if (!video_) {
+ DVLOG(1) << "Failed to create a video stream.";
+ return false;
+ }
+
+ if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) {
+ DVLOG(1) << "Failed to add video track to frame processor.";
+ return false;
+ }
+ }
+
+ success &= video_->UpdateVideoConfig(video_config, media_log_);
+ }
+
+ typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr;
+ if (text_stream_map_.empty()) {
+ for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end();
+ ++itr) {
+ ChunkDemuxerStream* const text_stream =
+ create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
+ if (!frame_processor_->AddTrack(itr->first, text_stream)) {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID "
+ << itr->first << " to frame processor.";
+ break;
+ }
+ text_stream->UpdateTextConfig(itr->second, media_log_);
+ text_stream_map_[itr->first] = text_stream;
+ new_text_track_cb_.Run(text_stream, itr->second);
+ }
+ } else {
+ const size_t text_count = text_stream_map_.size();
+ if (text_configs.size() != text_count) {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_)
+ << "The number of text track configs changed.";
+ } else if (text_count == 1) {
+ TextConfigItr config_itr = text_configs.begin();
+ TextStreamMap::iterator stream_itr = text_stream_map_.begin();
+ ChunkDemuxerStream* text_stream = stream_itr->second;
+ TextTrackConfig old_config = text_stream->text_track_config();
+ TextTrackConfig new_config(
+ config_itr->second.kind(), config_itr->second.label(),
+ config_itr->second.language(), old_config.id());
+ if (!new_config.Matches(old_config)) {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_)
+ << "New text track config does not match old one.";
+ } else {
+ StreamParser::TrackId old_id = stream_itr->first;
+ StreamParser::TrackId new_id = config_itr->first;
+ if (new_id != old_id) {
+ if (frame_processor_->UpdateTrack(old_id, new_id)) {
+ text_stream_map_.clear();
+ text_stream_map_[config_itr->first] = text_stream;
+ } else {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_)
+ << "Error remapping single text track number";
+ }
+ }
+ }
+ } else {
+ for (TextConfigItr config_itr = text_configs.begin();
+ config_itr != text_configs.end(); ++config_itr) {
+ TextStreamMap::iterator stream_itr =
+ text_stream_map_.find(config_itr->first);
+ if (stream_itr == text_stream_map_.end()) {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unexpected text track configuration for track ID "
+ << config_itr->first;
+ break;
+ }
+
+ const TextTrackConfig& new_config = config_itr->second;
+ ChunkDemuxerStream* stream = stream_itr->second;
+ TextTrackConfig old_config = stream->text_track_config();
+ if (!new_config.Matches(old_config)) {
+ success &= false;
+ MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID "
+ << config_itr->first
+ << " does not match old one.";
+ break;
+ }
+ }
+ }
+ }
+
+ frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint();
+
+ DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed");
+ if (success)
+ init_segment_received_cb_.Run();
+
+ return success;
+}
+
+void MediaSourceState::OnNewMediaSegment() {
+ DVLOG(2) << "OnNewMediaSegment()";
+ parsing_media_segment_ = true;
+ new_media_segment_ = true;
+}
+
+void MediaSourceState::OnEndOfMediaSegment() {
+ DVLOG(2) << "OnEndOfMediaSegment()";
+ parsing_media_segment_ = false;
+ new_media_segment_ = false;
+}
+
+bool MediaSourceState::OnNewBuffers(
+ const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map) {
+ DVLOG(2) << "OnNewBuffers()";
+ DCHECK(timestamp_offset_during_append_);
+ DCHECK(parsing_media_segment_);
+
+ const TimeDelta timestamp_offset_before_processing =
+ *timestamp_offset_during_append_;
+
+ // Calculate the new timestamp offset for audio/video tracks if the stream
+ // parser has requested automatic updates.
+ TimeDelta new_timestamp_offset = timestamp_offset_before_processing;
+ if (auto_update_timestamp_offset_) {
+ const bool have_audio_buffers = !audio_buffers.empty();
+ const bool have_video_buffers = !video_buffers.empty();
+ if (have_audio_buffers && have_video_buffers) {
+ new_timestamp_offset +=
+ std::min(EndTimestamp(audio_buffers), EndTimestamp(video_buffers));
+ } else if (have_audio_buffers) {
+ new_timestamp_offset += EndTimestamp(audio_buffers);
+ } else if (have_video_buffers) {
+ new_timestamp_offset += EndTimestamp(video_buffers);
+ }
+ }
+
+ if (!frame_processor_->ProcessFrames(
+ audio_buffers, video_buffers, text_map,
+ append_window_start_during_append_, append_window_end_during_append_,
+ &new_media_segment_, timestamp_offset_during_append_)) {
+ return false;
+ }
+
+ // Only update the timestamp offset if the frame processor hasn't already.
+ if (auto_update_timestamp_offset_ &&
+ timestamp_offset_before_processing == *timestamp_offset_during_append_) {
+ *timestamp_offset_during_append_ = new_timestamp_offset;
+ }
+
+ return true;
+}
+
+void MediaSourceState::OnSourceInitDone(
+ const StreamParser::InitParameters& params) {
+ auto_update_timestamp_offset_ = params.auto_update_timestamp_offset;
+ base::ResetAndReturn(&init_cb_).Run(params);
+}
+
+} // namespace media
diff --git a/media/filters/media_source_state.h b/media/filters/media_source_state.h
new file mode 100644
index 0000000..4ec8325
--- /dev/null
+++ b/media/filters/media_source_state.h
@@ -0,0 +1,214 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_MEDIA_SOURCE_STATE_H_
+#define MEDIA_FILTERS_MEDIA_SOURCE_STATE_H_
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_export.h"
+#include "media/base/stream_parser.h"
+#include "media/base/stream_parser_buffer.h"
+
+namespace media {
+
+using base::TimeDelta;
+
+class ChunkDemuxerStream;
+class FrameProcessor;
+
+// Contains state belonging to a source id.
+class MEDIA_EXPORT MediaSourceState {
+ public:
+ // Callback signature used to create ChunkDemuxerStreams.
+ typedef base::Callback<ChunkDemuxerStream*(DemuxerStream::Type)>
+ CreateDemuxerStreamCB;
+
+ typedef base::Closure InitSegmentReceivedCB;
+
+ typedef base::Callback<void(ChunkDemuxerStream*, const TextTrackConfig&)>
+ NewTextTrackCB;
+
+ MediaSourceState(scoped_ptr<StreamParser> stream_parser,
+ scoped_ptr<FrameProcessor> frame_processor,
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb,
+ const scoped_refptr<MediaLog>& media_log);
+
+ ~MediaSourceState();
+
+ void Init(const StreamParser::InitCB& init_cb,
+ bool allow_audio,
+ bool allow_video,
+ const StreamParser::EncryptedMediaInitDataCB&
+ encrypted_media_init_data_cb,
+ const NewTextTrackCB& new_text_track_cb);
+
+ // Appends new data to the StreamParser.
+ // Returns true if the data was successfully appended. Returns false if an
+ // error occurred. |*timestamp_offset| is used and possibly updated by the
+ // append. |append_window_start| and |append_window_end| correspond to the MSE
+ // spec's similarly named source buffer attributes that are used in coded
+ // frame processing. |init_segment_received_cb| is run for each new fully
+ // parsed initialization segment.
+ bool Append(const uint8_t* data,
+ size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb);
+
+ // Aborts the current append sequence and resets the parser.
+ void ResetParserState(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset);
+
+ // Calls Remove(|start|, |end|, |duration|) on all
+ // ChunkDemuxerStreams managed by this object.
+ void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
+
+ // If the buffer is full, attempts to try to free up space, as specified in
+ // the "Coded Frame Eviction Algorithm" in the Media Source Extensions Spec.
+ // Returns false iff buffer is still full after running eviction.
+ // https://w3c.github.io/media-source/#sourcebuffer-coded-frame-eviction
+ bool EvictCodedFrames(DecodeTimestamp media_time, size_t newDataSize);
+
+ // Returns true if currently parsing a media segment, or false otherwise.
+ bool parsing_media_segment() const { return parsing_media_segment_; }
+
+ // Sets |frame_processor_|'s sequence mode to |sequence_mode|.
+ void SetSequenceMode(bool sequence_mode);
+
+ // Signals the coded frame processor to update its group start timestamp to be
+ // |timestamp_offset| if it is in sequence append mode.
+ void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
+
+ // Returns the range of buffered data in this source, capped at |duration|.
+ // |ended| - Set to true if end of stream has been signaled and the special
+ // end of stream range logic needs to be executed.
+ Ranges<TimeDelta> GetBufferedRanges(TimeDelta duration, bool ended) const;
+
+ // Returns the highest buffered duration across all streams managed
+ // by this object.
+ // Returns TimeDelta() if none of the streams contain buffered data.
+ TimeDelta GetMaxBufferedDuration() const;
+
+ // Helper methods that call methods with similar names on all the
+ // ChunkDemuxerStreams managed by this object.
+ void StartReturningData();
+ void AbortReads();
+ void Seek(TimeDelta seek_time);
+ void CompletePendingReadIfPossible();
+ void OnSetDuration(TimeDelta duration);
+ void MarkEndOfStream();
+ void UnmarkEndOfStream();
+ void Shutdown();
+ // Sets the memory limit on each stream of a specific type.
+ // |memory_limit| is the maximum number of bytes each stream of type |type|
+ // is allowed to hold in its buffer.
+ void SetMemoryLimits(DemuxerStream::Type type, size_t memory_limit);
+ bool IsSeekWaitingForData() const;
+
+ typedef std::list<Ranges<TimeDelta>> RangesList;
+ static Ranges<TimeDelta> ComputeRangesIntersection(
+ const RangesList& activeRanges,
+ bool ended);
+
+ private:
+ // Called by the |stream_parser_| when a new initialization segment is
+ // encountered.
+ // Returns true on a successful call. Returns false if an error occurred while
+ // processing decoder configurations.
+ bool OnNewConfigs(bool allow_audio,
+ bool allow_video,
+ const AudioDecoderConfig& audio_config,
+ const VideoDecoderConfig& video_config,
+ const StreamParser::TextTrackConfigMap& text_configs);
+
+ // Called by the |stream_parser_| at the beginning of a new media segment.
+ void OnNewMediaSegment();
+
+ // Called by the |stream_parser_| at the end of a media segment.
+ void OnEndOfMediaSegment();
+
+ // Called by the |stream_parser_| when new buffers have been parsed.
+ // It processes the new buffers using |frame_processor_|, which includes
+ // appending the processed frames to associated demuxer streams for each
+ // frame's track.
+ // Returns true on a successful call. Returns false if an error occurred while
+ // processing the buffers.
+ bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map);
+
+ void OnSourceInitDone(const StreamParser::InitParameters& params);
+
+ // EstimateVideoDataSize uses some heuristics to estimate the size of the
+ // video size in the chunk of muxed audio/video data without parsing it.
+ // This is used by EvictCodedFrames algorithm, which happens before Append
+ // (and therefore before parsing is performed) to prepare space for new data.
+ size_t EstimateVideoDataSize(size_t muxed_data_chunk_size) const;
+
+ CreateDemuxerStreamCB create_demuxer_stream_cb_;
+ NewTextTrackCB new_text_track_cb_;
+
+ // During Append(), if OnNewBuffers() coded frame processing updates the
+ // timestamp offset then |*timestamp_offset_during_append_| is also updated
+ // so Append()'s caller can know the new offset. This pointer is only non-NULL
+ // during the lifetime of an Append() call.
+ TimeDelta* timestamp_offset_during_append_;
+
+ // During Append(), coded frame processing triggered by OnNewBuffers()
+ // requires these two attributes. These are only valid during the lifetime of
+ // an Append() call.
+ TimeDelta append_window_start_during_append_;
+ TimeDelta append_window_end_during_append_;
+
+ // Set to true if the next buffers appended within the append window
+ // represent the start of a new media segment. This flag being set
+ // triggers a call to |new_segment_cb_| when the new buffers are
+ // appended. The flag is set on actual media segment boundaries and
+ // when the "append window" filtering causes discontinuities in the
+ // appended data.
+ // TODO(wolenetz/acolwell): Investigate if we need this, or if coded frame
+ // processing's discontinuity logic is enough. See http://crbug.com/351489.
+ bool new_media_segment_;
+
+ // Keeps track of whether a media segment is being parsed.
+ bool parsing_media_segment_;
+
+ // The object used to parse appended data.
+ scoped_ptr<StreamParser> stream_parser_;
+
+ ChunkDemuxerStream* audio_; // Not owned by |this|.
+ ChunkDemuxerStream* video_; // Not owned by |this|.
+
+ typedef std::map<StreamParser::TrackId, ChunkDemuxerStream*> TextStreamMap;
+ TextStreamMap text_stream_map_; // |this| owns the map's stream pointers.
+
+ scoped_ptr<FrameProcessor> frame_processor_;
+ scoped_refptr<MediaLog> media_log_;
+ StreamParser::InitCB init_cb_;
+
+ // During Append(), OnNewConfigs() will trigger the initialization segment
+ // received algorithm. This callback is only non-NULL during the lifetime of
+ // an Append() call. Note, the MSE spec explicitly disallows this algorithm
+ // during an Abort(), since Abort() is allowed only to emit coded frames, and
+ // only if the parser is PARSING_MEDIA_SEGMENT (not an INIT segment).
+ InitSegmentReceivedCB init_segment_received_cb_;
+
+ // Indicates that timestampOffset should be updated automatically during
+ // OnNewBuffers() based on the earliest end timestamp of the buffers provided.
+ // TODO(wolenetz): Refactor this function while integrating April 29, 2014
+ // changes to MSE spec. See http://crbug.com/371499.
+ bool auto_update_timestamp_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaSourceState);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_MEDIA_SOURCE_STATE_H_
diff --git a/media/media.gyp b/media/media.gyp
index 08cd4c9..6c1b76e 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -588,6 +588,8 @@
'filters/in_memory_url_protocol.h',
'filters/jpeg_parser.cc',
'filters/jpeg_parser.h',
+ 'filters/media_source_state.cc',
+ 'filters/media_source_state.h',
'filters/opus_audio_decoder.cc',
'filters/opus_audio_decoder.h',
'filters/opus_constants.cc',