summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormiu@chromium.org <miu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-13 23:20:45 +0000
committermiu@chromium.org <miu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-13 23:20:45 +0000
commit1bd38bd28cbb8daa623541dcdeee55ad33e4bd15 (patch)
tree932b934243d5d9707c9bc4d5c6b87c30ba7beeee
parentf805162421d58615a6b6e9b2ebe4872bc208bc38 (diff)
downloadchromium_src-1bd38bd28cbb8daa623541dcdeee55ad33e4bd15.zip
chromium_src-1bd38bd28cbb8daa623541dcdeee55ad33e4bd15.tar.gz
chromium_src-1bd38bd28cbb8daa623541dcdeee55ad33e4bd15.tar.bz2
Replace erroneous use of base::Time with base::TimeTicks throughout media code.
This change corrects all the "low-hanging fruit," leaving the VideoCaptureDevice interface and MediaLog (?) as separate changes; as they are referenced throughout other components. Added a PRESUBMIT.py script for media/ code that will scan future code changes for use of base::Time (or base::Clock/DefaultClock) and prompt the developer with a warning explaining that clock skew is a serious and subtle source of bugs. Testing: Ran all media_unittests and content_unittests. Manually ran through demo sites that utilize all of the following (on all of Win/Mac/Linux): 1. HTML5 audio and video; 2. PPAPI Flash video; 3. WebAudio API; 4. WebRTC; 5. Tab Capture API. BUG=247881 TEST=media_unittests, content_unittests, manual confirmation of major media use cases Review URL: https://chromiumcodereview.appspot.com/16823003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@206206 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--media/PRESUBMIT.py62
-rw-r--r--media/audio/audio_low_latency_input_output_unittest.cc8
-rw-r--r--media/audio/fake_audio_consumer.cc4
-rw-r--r--media/audio/fake_audio_consumer.h2
-rw-r--r--media/audio/fake_audio_consumer_unittest.cc8
-rw-r--r--media/audio/fake_audio_input_stream.cc6
-rw-r--r--media/audio/fake_audio_input_stream.h2
-rw-r--r--media/audio/linux/alsa_input.cc6
-rw-r--r--media/audio/linux/alsa_input.h2
-rw-r--r--media/audio/mac/audio_input_mac.cc4
-rw-r--r--media/audio/mac/audio_input_mac.h2
-rw-r--r--media/audio/win/audio_low_latency_output_win_unittest.cc9
-rw-r--r--media/audio/win/audio_unified_win_unittest.cc9
-rw-r--r--media/base/android/media_source_player.cc22
-rw-r--r--media/base/android/media_source_player.h12
-rw-r--r--media/base/audio_renderer_mixer.cc6
-rw-r--r--media/base/audio_renderer_mixer.h2
-rw-r--r--media/base/audio_renderer_mixer_unittest.cc8
-rw-r--r--media/base/clock.cc13
-rw-r--r--media/base/clock.h27
-rw-r--r--media/base/clock_unittest.cc8
-rw-r--r--media/base/pipeline.cc10
-rw-r--r--media/base/pipeline.h8
-rw-r--r--media/base/pipeline_unittest.cc10
-rw-r--r--media/filters/audio_renderer_impl.cc7
-rw-r--r--media/filters/audio_renderer_impl.h10
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc5
27 files changed, 171 insertions, 101 deletions
diff --git a/media/PRESUBMIT.py b/media/PRESUBMIT.py
new file mode 100644
index 0000000..c844898
--- /dev/null
+++ b/media/PRESUBMIT.py
@@ -0,0 +1,62 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Top-level presubmit script for Chromium media component.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into gcl.
+"""
+
+
+def _CheckForUseOfWrongClock(input_api, output_api):
+ """Make sure new lines of media code don't use a clock susceptible to skew."""
+
+ def FilterFile(affected_file):
+ """Return true if the file could contain code referencing base::Time."""
+ return affected_file.LocalPath().endswith(
+ ('.h', '.cc', '.cpp', '.cxx', '.mm'))
+
+ # Regular expression that should detect any explicit references to the
+ # base::Time type (or base::Clock/DefaultClock), whether in using decls,
+ # typedefs, or to call static methods.
+ base_time_type_pattern = r'base::(Time|Clock|DefaultClock)(\W|$)'
+
+ # Regular expression that should detect references to the base::Time class
+ # members, such as a call to base::Time::Now. Exceptions: References to the
+ # kXXX constants are ignored.
+ base_time_member_pattern = r'(^|\W)(Time|Clock|DefaultClock)::[^k]'
+
+ problem_re = input_api.re.compile(
+ r'(' + base_time_type_pattern + r')|(' + base_time_member_pattern + r')')
+ problems = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ for line_number, line in f.ChangedContents():
+ if problem_re.search(line):
+ problems.append(
+ ' %s:%d\n %s' % (f.LocalPath(), line_number, line.strip()))
+
+ if problems:
+ return [output_api.PresubmitPromptOrNotify(
+ 'You added one or more references to the base::Time class and/or one\n'
+ 'of its member functions (or base::Clock/DefaultClock). In media\n'
+ 'code, it is rarely correct to use a clock susceptible to time skew!\n'
+ 'Instead, could you use base::TimeTicks to track the passage of\n'
+ 'real-world time?\n\n' +
+ '\n'.join(problems))]
+ else:
+ return []
+
+
+def _CheckChange(input_api, output_api):
+ results = []
+ results.extend(_CheckForUseOfWrongClock(input_api, output_api))
+ return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CheckChange(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CheckChange(input_api, output_api)
diff --git a/media/audio/audio_low_latency_input_output_unittest.cc b/media/audio/audio_low_latency_input_output_unittest.cc
index f4aeab4..0b0733b 100644
--- a/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/media/audio/audio_low_latency_input_output_unittest.cc
@@ -136,7 +136,7 @@ class FullDuplexAudioSinkSource
channels_(channels),
input_elements_to_write_(0),
output_elements_to_write_(0),
- previous_write_time_(base::Time::Now()) {
+ previous_write_time_(base::TimeTicks::Now()) {
// Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
frame_size_ = (16 / 8) * channels_;
@@ -186,8 +186,8 @@ class FullDuplexAudioSinkSource
// Update three components in the AudioDelayState for this recorded
// audio packet.
- base::Time now_time = base::Time::Now();
- int diff = (now_time - previous_write_time_).InMilliseconds();
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_write_time_).InMilliseconds();
previous_write_time_ = now_time;
if (input_elements_to_write_ < kMaxDelayMeasurements) {
delay_states_[input_elements_to_write_].delta_time_ms = diff;
@@ -277,7 +277,7 @@ class FullDuplexAudioSinkSource
scoped_ptr<AudioDelayState[]> delay_states_;
size_t input_elements_to_write_;
size_t output_elements_to_write_;
- base::Time previous_write_time_;
+ base::TimeTicks previous_write_time_;
};
class AudioInputStreamTraits {
diff --git a/media/audio/fake_audio_consumer.cc b/media/audio/fake_audio_consumer.cc
index e70a74a..5232170 100644
--- a/media/audio/fake_audio_consumer.cc
+++ b/media/audio/fake_audio_consumer.cc
@@ -33,7 +33,7 @@ void FakeAudioConsumer::Start(const ReadCB& read_cb) {
DCHECK(read_cb_.is_null());
DCHECK(!read_cb.is_null());
read_cb_ = read_cb;
- next_read_time_ = base::Time::Now();
+ next_read_time_ = base::TimeTicks::Now();
read_task_cb_.Reset(base::Bind(
&FakeAudioConsumer::DoRead, base::Unretained(this)));
message_loop_->PostTask(FROM_HERE, read_task_cb_.callback());
@@ -53,7 +53,7 @@ void FakeAudioConsumer::DoRead() {
// Need to account for time spent here due to the cost of |read_cb_| as well
// as the imprecision of PostDelayedTask().
- base::Time now = base::Time::Now();
+ const base::TimeTicks now = base::TimeTicks::Now();
base::TimeDelta delay = next_read_time_ + buffer_duration_ - now;
// If we're behind, find the next nearest ontime interval.
diff --git a/media/audio/fake_audio_consumer.h b/media/audio/fake_audio_consumer.h
index 793e553..57da74f 100644
--- a/media/audio/fake_audio_consumer.h
+++ b/media/audio/fake_audio_consumer.h
@@ -48,7 +48,7 @@ class MEDIA_EXPORT FakeAudioConsumer {
ReadCB read_cb_;
scoped_ptr<AudioBus> audio_bus_;
base::TimeDelta buffer_duration_;
- base::Time next_read_time_;
+ base::TimeTicks next_read_time_;
// Used to post delayed tasks to the AudioThread that we can cancel.
base::CancelableClosure read_task_cb_;
diff --git a/media/audio/fake_audio_consumer_unittest.cc b/media/audio/fake_audio_consumer_unittest.cc
index 362686c..9ffc149 100644
--- a/media/audio/fake_audio_consumer_unittest.cc
+++ b/media/audio/fake_audio_consumer_unittest.cc
@@ -58,7 +58,7 @@ class FakeAudioConsumerTest : public testing::Test {
if (source_.callbacks() == 0) {
RunOnAudioThread();
- start_time_ = base::Time::Now();
+ start_time_ = base::TimeTicks::Now();
}
// Keep going until we've seen the requested number of callbacks.
@@ -67,7 +67,7 @@ class FakeAudioConsumerTest : public testing::Test {
&FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
base::Unretained(this), callbacks), time_between_callbacks_ / 2);
} else {
- end_time_ = base::Time::Now();
+ end_time_ = base::TimeTicks::Now();
EndTest(callbacks);
}
}
@@ -84,8 +84,8 @@ class FakeAudioConsumerTest : public testing::Test {
AudioParameters params_;
FakeAudioConsumer fake_consumer_;
SineWaveAudioSource source_;
- base::Time start_time_;
- base::Time end_time_;
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
base::TimeDelta time_between_callbacks_;
private:
diff --git a/media/audio/fake_audio_input_stream.cc b/media/audio/fake_audio_input_stream.cc
index d2b1ce9..a00a9b6 100644
--- a/media/audio/fake_audio_input_stream.cc
+++ b/media/audio/fake_audio_input_stream.cc
@@ -8,7 +8,7 @@
#include "base/lazy_instance.h"
#include "media/audio/audio_manager_base.h"
-using base::Time;
+using base::TimeTicks;
using base::TimeDelta;
namespace media {
@@ -66,7 +66,7 @@ bool FakeAudioInputStream::Open() {
void FakeAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(!thread_.IsRunning());
callback_ = callback;
- last_callback_time_ = Time::Now();
+ last_callback_time_ = TimeTicks::Now();
thread_.Start();
thread_.message_loop()->PostDelayedTask(
FROM_HERE,
@@ -115,7 +115,7 @@ void FakeAudioInputStream::DoCallback() {
callback_->OnData(this, buffer_.get(), buffer_size_, buffer_size_, 1.0);
frames_elapsed_ += params_.frames_per_buffer();
- Time now = Time::Now();
+ const TimeTicks now = TimeTicks::Now();
base::TimeDelta next_callback_time =
last_callback_time_ + callback_interval_ * 2 - now;
diff --git a/media/audio/fake_audio_input_stream.h b/media/audio/fake_audio_input_stream.h
index 169405b..2de192f 100644
--- a/media/audio/fake_audio_input_stream.h
+++ b/media/audio/fake_audio_input_stream.h
@@ -61,7 +61,7 @@ class MEDIA_EXPORT FakeAudioInputStream
int buffer_size_;
AudioParameters params_;
base::Thread thread_;
- base::Time last_callback_time_;
+ base::TimeTicks last_callback_time_;
base::TimeDelta callback_interval_;
int beep_duration_in_buffers_;
int beep_generated_in_buffers_;
diff --git a/media/audio/linux/alsa_input.cc b/media/audio/linux/alsa_input.cc
index bbbee05..3f20158 100644
--- a/media/audio/linux/alsa_input.cc
+++ b/media/audio/linux/alsa_input.cc
@@ -119,7 +119,7 @@ void AlsaPcmInputStream::Start(AudioInputCallback* callback) {
// buffer might have got filled, to accommodate some delays in the audio
// driver. This could also give us a smooth read sequence going forward.
base::TimeDelta delay = buffer_duration_ + buffer_duration_ / 2;
- next_read_time_ = base::Time::Now() + delay;
+ next_read_time_ = base::TimeTicks::Now() + delay;
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
@@ -182,7 +182,7 @@ void AlsaPcmInputStream::ReadAudio() {
// Even Though read callback was behind schedule, there is no data, so
// reset the next_read_time_.
if (read_callback_behind_schedule_) {
- next_read_time_ = base::Time::Now();
+ next_read_time_ = base::TimeTicks::Now();
read_callback_behind_schedule_ = false;
}
@@ -218,7 +218,7 @@ void AlsaPcmInputStream::ReadAudio() {
}
next_read_time_ += buffer_duration_;
- base::TimeDelta delay = next_read_time_ - base::Time::Now();
+ base::TimeDelta delay = next_read_time_ - base::TimeTicks::Now();
if (delay < base::TimeDelta()) {
DVLOG(1) << "Audio read callback behind schedule by "
<< (buffer_duration_ - delay).InMicroseconds()
diff --git a/media/audio/linux/alsa_input.h b/media/audio/linux/alsa_input.h
index 57092eb..974e36b 100644
--- a/media/audio/linux/alsa_input.h
+++ b/media/audio/linux/alsa_input.h
@@ -76,7 +76,7 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
AlsaWrapper* wrapper_;
base::TimeDelta buffer_duration_; // Length of each recorded buffer.
AudioInputCallback* callback_; // Valid during a recording session.
- base::Time next_read_time_; // Scheduled time for the next read callback.
+ base::TimeTicks next_read_time_; // Scheduled time for next read callback.
snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
diff --git a/media/audio/mac/audio_input_mac.cc b/media/audio/mac/audio_input_mac.cc
index c2a3696..06af6d1 100644
--- a/media/audio/mac/audio_input_mac.cc
+++ b/media/audio/mac/audio_input_mac.cc
@@ -197,7 +197,7 @@ void PCMQueueInAudioInputStream::HandleInputBuffer(
// TODO(dalecurtis): This is a HACK. Long term the AudioQueue path is going
// away in favor of the AudioUnit based AUAudioInputStream(). Tracked by
// http://crbug.com/161383.
- base::TimeDelta elapsed = base::Time::Now() - last_fill_;
+ base::TimeDelta elapsed = base::TimeTicks::Now() - last_fill_;
const base::TimeDelta kMinDelay = base::TimeDelta::FromMilliseconds(5);
if (elapsed < kMinDelay)
base::PlatformThread::Sleep(kMinDelay - elapsed);
@@ -208,7 +208,7 @@ void PCMQueueInAudioInputStream::HandleInputBuffer(
audio_buffer->mAudioDataByteSize,
0.0);
- last_fill_ = base::Time::Now();
+ last_fill_ = base::TimeTicks::Now();
}
// Recycle the buffer.
OSStatus err = QueueNextBuffer(audio_buffer);
diff --git a/media/audio/mac/audio_input_mac.h b/media/audio/mac/audio_input_mac.h
index 1f9856f1..d49fed8 100644
--- a/media/audio/mac/audio_input_mac.h
+++ b/media/audio/mac/audio_input_mac.h
@@ -78,7 +78,7 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
// True iff Start() has been called successfully.
bool started_;
// Used to determine if we need to slow down |callback_| calls.
- base::Time last_fill_;
+ base::TimeTicks last_fill_;
DISALLOW_COPY_AND_ASSIGN(PCMQueueInAudioInputStream);
};
diff --git a/media/audio/win/audio_low_latency_output_win_unittest.cc b/media/audio/win/audio_low_latency_output_win_unittest.cc
index af60be6..4fb09de 100644
--- a/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -77,7 +77,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
public:
explicit ReadFromFileAudioSource(const std::string& name)
: pos_(0),
- previous_call_time_(base::Time::Now()),
+ previous_call_time_(base::TimeTicks::Now()),
text_file_(NULL),
elements_to_write_(0) {
// Reads a test file from media/test/data directory.
@@ -116,8 +116,9 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
AudioBuffersState buffers_state) {
// Store time difference between two successive callbacks in an array.
// These values will be written to a file in the destructor.
- int diff = (base::Time::Now() - previous_call_time_).InMilliseconds();
- previous_call_time_ = base::Time::Now();
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_call_time_).InMilliseconds();
+ previous_call_time_ = now_time;
if (elements_to_write_ < kMaxDeltaSamples) {
delta_times_[elements_to_write_] = diff;
++elements_to_write_;
@@ -154,7 +155,7 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
scoped_refptr<DecoderBuffer> file_;
scoped_ptr<int[]> delta_times_;
int pos_;
- base::Time previous_call_time_;
+ base::TimeTicks previous_call_time_;
FILE* text_file_;
size_t elements_to_write_;
};
diff --git a/media/audio/win/audio_unified_win_unittest.cc b/media/audio/win/audio_unified_win_unittest.cc
index 6fad0b7..807ca7f 100644
--- a/media/audio/win/audio_unified_win_unittest.cc
+++ b/media/audio/win/audio_unified_win_unittest.cc
@@ -63,7 +63,7 @@ class MockUnifiedSourceCallback
class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
public:
explicit UnifiedSourceCallback()
- : previous_call_time_(base::Time::Now()),
+ : previous_call_time_(base::TimeTicks::Now()),
text_file_(NULL),
elements_to_write_(0) {
delta_times_.reset(new int[kMaxDeltaSamples]);
@@ -98,8 +98,9 @@ class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
AudioBus* dest,
AudioBuffersState buffers_state) {
// Store time between this callback and the previous callback.
- int diff = (base::Time::Now() - previous_call_time_).InMilliseconds();
- previous_call_time_ = base::Time::Now();
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_call_time_).InMilliseconds();
+ previous_call_time_ = now_time;
if (elements_to_write_ < kMaxDeltaSamples) {
delta_times_[elements_to_write_] = diff;
++elements_to_write_;
@@ -134,7 +135,7 @@ class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
}
private:
- base::Time previous_call_time_;
+ base::TimeTicks previous_call_time_;
scoped_ptr<int[]> delta_times_;
FILE* text_file_;
size_t elements_to_write_;
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index 8cadb60..446805c 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -85,7 +85,7 @@ class VideoDecoderJob : public MediaDecoderJob {
void MediaDecoderJob::Decode(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback) {
DCHECK(!decoding_);
@@ -99,7 +99,7 @@ void MediaDecoderJob::Decode(
void MediaDecoderJob::DecodeInternal(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback) {
@@ -151,7 +151,7 @@ void MediaDecoderJob::DecodeInternal(
break;
base::TimeDelta time_to_render;
if (!start_wallclock_time.is_null()) {
- time_to_render = presentation_timestamp - (base::Time::Now() -
+ time_to_render = presentation_timestamp - (base::TimeTicks::Now() -
start_wallclock_time + start_presentation_timestamp);
}
if (time_to_render >= base::TimeDelta()) {
@@ -188,8 +188,8 @@ void MediaDecoderJob::ReleaseOutputBuffer(
}
media_codec_bridge_->ReleaseOutputBuffer(outputBufferIndex, !is_audio_);
message_loop_->PostTask(FROM_HERE, base::Bind(
- callback, DECODE_SUCCEEDED, presentation_timestamp, base::Time::Now(),
- end_of_stream));
+ callback, DECODE_SUCCEEDED, presentation_timestamp,
+ base::TimeTicks::Now(), end_of_stream));
}
void MediaDecoderJob::OnDecodeCompleted() {
@@ -300,7 +300,7 @@ void MediaSourcePlayer::Start() {
void MediaSourcePlayer::Pause() {
playing_ = false;
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
}
bool MediaSourcePlayer::IsPlaying() {
@@ -449,7 +449,7 @@ void MediaSourcePlayer::OnSeekRequestAck(unsigned seek_request_id) {
void MediaSourcePlayer::UpdateTimestamps(
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time) {
+ const base::TimeTicks& wallclock_time) {
last_presentation_timestamp_ = presentation_timestamp;
OnTimeUpdated();
if (start_wallclock_time_.is_null() && playing_) {
@@ -470,7 +470,7 @@ void MediaSourcePlayer::ProcessPendingEvents() {
return;
}
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
if (pending_event_ & CONFIG_CHANGE_EVENT_PENDING) {
DCHECK(reconfig_audio_decoder_ || reconfig_video_decoder_);
manager()->OnMediaConfigRequest(player_id());
@@ -490,7 +490,7 @@ void MediaSourcePlayer::ProcessPendingEvents() {
void MediaSourcePlayer::MediaDecoderCallback(
bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time, bool end_of_stream) {
+ const base::TimeTicks& wallclock_time, bool end_of_stream) {
if (active_decoding_tasks_ > 0)
active_decoding_tasks_--;
@@ -601,7 +601,7 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
if ((!HasAudio() || audio_finished_) && (!HasVideo() || video_finished_)) {
playing_ = false;
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
OnPlaybackComplete();
}
}
@@ -611,7 +611,7 @@ void MediaSourcePlayer::ClearDecodingData() {
audio_decoder_job_->Flush();
if (video_decoder_job_)
video_decoder_job_->Flush();
- start_wallclock_time_ = base::Time();
+ start_wallclock_time_ = base::TimeTicks();
received_audio_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
received_video_ = MediaPlayerHostMsg_ReadFromDemuxerAck_Params();
audio_access_unit_index_ = 0;
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index 3b12dd5..836482c 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -46,12 +46,12 @@ class MediaDecoderJob {
// finished successfully, presentation time, timestamp when the data is
// rendered, whether decoder is reaching EOS.
typedef base::Callback<void(DecodeStatus, const base::TimeDelta&,
- const base::Time&, bool)> DecoderCallback;
+ const base::TimeTicks&, bool)> DecoderCallback;
// Called by MediaSourcePlayer to decode some data.
void Decode(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
const MediaDecoderJob::DecoderCallback& callback);
@@ -85,7 +85,7 @@ class MediaDecoderJob {
// flushed at the beginning of this call.
void DecodeInternal(
const MediaPlayerHostMsg_ReadFromDemuxerAck_Params::AccessUnit& unit,
- const base::Time& start_wallclock_time,
+ const base::TimeTicks& start_wallclock_time,
const base::TimeDelta& start_presentation_timestamp,
bool needs_flush,
const MediaDecoderJob::DecoderCallback& callback);
@@ -161,7 +161,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// Update the timestamps for A/V sync scheduling.
void UpdateTimestamps(
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time);
+ const base::TimeTicks& wallclock_time);
// Helper function for starting media playback.
void StartInternal();
@@ -173,7 +173,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
void MediaDecoderCallback(
bool is_audio, MediaDecoderJob::DecodeStatus decode_status,
const base::TimeDelta& presentation_timestamp,
- const base::Time& wallclock_time, bool end_of_stream);
+ const base::TimeTicks& wallclock_time, bool end_of_stream);
// Handle pending events when all the decoder jobs finished.
void ProcessPendingEvents();
@@ -231,7 +231,7 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid {
// should be rendered.
// TODO(qinmin): Need to fix the problem if audio/video lagged too far behind
// due to network or decoding problem.
- base::Time start_wallclock_time_;
+ base::TimeTicks start_wallclock_time_;
base::TimeDelta start_presentation_timestamp_;
// The surface object currently owned by the player.
diff --git a/media/base/audio_renderer_mixer.cc b/media/base/audio_renderer_mixer.cc
index 7db9b3f..11b1211 100644
--- a/media/base/audio_renderer_mixer.cc
+++ b/media/base/audio_renderer_mixer.cc
@@ -18,7 +18,7 @@ AudioRendererMixer::AudioRendererMixer(
: audio_sink_(sink),
audio_converter_(input_params, output_params, true),
pause_delay_(base::TimeDelta::FromSeconds(kPauseDelaySeconds)),
- last_play_time_(base::Time::Now()),
+ last_play_time_(base::TimeTicks::Now()),
// Initialize |playing_| to true since Start() results in an auto-play.
playing_(true) {
audio_sink_->Initialize(output_params, this);
@@ -40,7 +40,7 @@ void AudioRendererMixer::AddMixerInput(AudioConverter::InputCallback* input,
if (!playing_) {
playing_ = true;
- last_play_time_ = base::Time::Now();
+ last_play_time_ = base::TimeTicks::Now();
audio_sink_->Play();
}
@@ -65,7 +65,7 @@ int AudioRendererMixer::Render(AudioBus* audio_bus,
// If there are no mixer inputs and we haven't seen one for a while, pause the
// sink to avoid wasting resources when media elements are present but remain
// in the pause state.
- base::Time now = base::Time::Now();
+ const base::TimeTicks now = base::TimeTicks::Now();
if (!mixer_inputs_.empty()) {
last_play_time_ = now;
} else if (now - last_play_time_ >= pause_delay_ && playing_) {
diff --git a/media/base/audio_renderer_mixer.h b/media/base/audio_renderer_mixer.h
index 6168064..943d779 100644
--- a/media/base/audio_renderer_mixer.h
+++ b/media/base/audio_renderer_mixer.h
@@ -56,7 +56,7 @@ class MEDIA_EXPORT AudioRendererMixer
// Handles physical stream pause when no inputs are playing. For latency
// reasons we don't want to immediately pause the physical stream.
base::TimeDelta pause_delay_;
- base::Time last_play_time_;
+ base::TimeTicks last_play_time_;
bool playing_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixer);
diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc
index a0a34f3..8853068 100644
--- a/media/base/audio_renderer_mixer_unittest.cc
+++ b/media/base/audio_renderer_mixer_unittest.cc
@@ -421,11 +421,11 @@ TEST_P(AudioRendererMixerBehavioralTest, MixerPausesStream) {
// Ensure never playing the input results in a sink pause.
const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(100);
- base::Time start_time = base::Time::Now();
+ base::TimeTicks start_time = base::TimeTicks::Now();
while (!pause_event.IsSignaled()) {
mixer_callback_->Render(audio_bus_.get(), 0);
base::PlatformThread::Sleep(kSleepTime);
- ASSERT_TRUE(base::Time::Now() - start_time < kTestTimeout);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
}
pause_event.Reset();
@@ -436,11 +436,11 @@ TEST_P(AudioRendererMixerBehavioralTest, MixerPausesStream) {
mixer_inputs_[0]->Pause();
// Ensure once the input is paused the sink eventually pauses.
- start_time = base::Time::Now();
+ start_time = base::TimeTicks::Now();
while (!pause_event.IsSignaled()) {
mixer_callback_->Render(audio_bus_.get(), 0);
base::PlatformThread::Sleep(kSleepTime);
- ASSERT_TRUE(base::Time::Now() - start_time < kTestTimeout);
+ ASSERT_TRUE(base::TimeTicks::Now() - start_time < kTestTimeout);
}
mixer_inputs_[0]->Stop();
diff --git a/media/base/clock.cc b/media/base/clock.cc
index 733c5fe..ea95483 100644
--- a/media/base/clock.cc
+++ b/media/base/clock.cc
@@ -7,12 +7,12 @@
#include <algorithm>
#include "base/logging.h"
-#include "base/time/clock.h"
+#include "base/time/tick_clock.h"
#include "media/base/buffers.h"
namespace media {
-Clock::Clock(base::Clock* clock) : clock_(clock) {
+Clock::Clock(base::TickClock* clock) : clock_(clock) {
DCHECK(clock_);
Reset();
}
@@ -89,7 +89,8 @@ void Clock::SetDuration(base::TimeDelta duration) {
max_time_ = ClampToValidTimeRange(max_time_);
}
-base::TimeDelta Clock::ElapsedViaProvidedTime(const base::Time& time) const {
+base::TimeDelta Clock::ElapsedViaProvidedTime(
+ const base::TimeTicks& time) const {
// TODO(scherkus): floating point badness scaling time by playback rate.
int64 now_us = (time - reference_).InMicroseconds();
now_us = static_cast<int64>(now_us * playback_rate_);
@@ -119,11 +120,11 @@ void Clock::UpdateReferencePoints() {
void Clock::UpdateReferencePoints(base::TimeDelta current_time) {
media_time_ = ClampToValidTimeRange(current_time);
- reference_ = clock_->Now();
+ reference_ = clock_->NowTicks();
}
base::TimeDelta Clock::EstimatedElapsedTime() {
- return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->Now()));
+ return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->NowTicks()));
}
void Clock::Reset() {
@@ -132,7 +133,7 @@ void Clock::Reset() {
max_time_ = kNoTimestamp();
duration_ = kNoTimestamp();
media_time_ = base::TimeDelta();
- reference_ = base::Time();
+ reference_ = base::TimeTicks();
underflow_ = false;
}
diff --git a/media/base/clock.h b/media/base/clock.h
index 267666f..6a86106 100644
--- a/media/base/clock.h
+++ b/media/base/clock.h
@@ -10,16 +10,16 @@
#include "media/base/media_export.h"
namespace base {
-class Clock;
+class TickClock;
} // namespace base
namespace media {
// A clock represents a single source of time to allow audio and video streams
// to synchronize with each other. Clock essentially tracks the media time with
-// respect to some other source of time, whether that may be the system clock or
-// updates via SetTime(). Clock uses linear interpolation to calculate the
-// current media time since the last time SetTime() was called.
+// respect to some other source of time, whether that may be the monotonic
+// system clock or updates via SetTime(). Clock uses linear interpolation to
+// calculate the current media time since the last time SetTime() was called.
//
// Clocks start off paused with a playback rate of 1.0f and a media time of 0.
//
@@ -28,9 +28,12 @@ namespace media {
// TODO(scherkus): Clock will some day be responsible for executing callbacks
// given a media time. This will be used primarily by video renderers. For now
// we'll keep using a poll-and-sleep solution.
+//
+// TODO(miu): Rename media::Clock to avoid confusion (and tripping up the media
+// PRESUBMIT script on future changes).
class MEDIA_EXPORT Clock {
public:
- explicit Clock(base::Clock* clock);
+ explicit Clock(base::TickClock* clock);
~Clock();
// Returns true if the clock is running.
@@ -88,13 +91,13 @@ class MEDIA_EXPORT Clock {
// the |max_time_| cap.
base::TimeDelta EstimatedElapsedTime();
- // Returns the current media time treating the given time as the latest
- // value as returned by |time_provider_|.
- base::TimeDelta ElapsedViaProvidedTime(const base::Time& time) const;
+ // Translates |time| into the current media time, based on the perspective of
+ // the monotonically-increasing system clock.
+ base::TimeDelta ElapsedViaProvidedTime(const base::TimeTicks& time) const;
base::TimeDelta ClampToValidTimeRange(base::TimeDelta time) const;
- base::Clock* const clock_;
+ base::TickClock* const clock_;
// Whether the clock is running.
bool playing_;
@@ -103,9 +106,9 @@ class MEDIA_EXPORT Clock {
// allowed.
bool underflow_;
- // The system clock time when this clock last starting playing or had its
- // time set via SetTime().
- base::Time reference_;
+ // The monotonic system clock time when this Clock last started playing or had
+ // its time set via SetTime().
+ base::TimeTicks reference_;
// Current accumulated amount of media time. The remaining portion must be
// calculated by comparing the system time to the reference time.
diff --git a/media/base/clock_unittest.cc b/media/base/clock_unittest.cc
index 919c7e5..3bf0599 100644
--- a/media/base/clock_unittest.cc
+++ b/media/base/clock_unittest.cc
@@ -4,7 +4,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
-#include "base/test/simple_test_clock.h"
+#include "base/test/simple_test_tick_clock.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -30,7 +30,7 @@ static const int kDurationInSeconds = 120;
class ClockTest : public ::testing::Test {
public:
- ClockTest() : clock_(&test_clock_) {
+ ClockTest() : clock_(&test_tick_clock_) {
SetDuration();
}
@@ -43,10 +43,10 @@ class ClockTest : public ::testing::Test {
}
void AdvanceSystemTime(base::TimeDelta delta) {
- test_clock_.Advance(delta);
+ test_tick_clock_.Advance(delta);
}
- base::SimpleTestClock test_clock_;
+ base::SimpleTestTickClock test_tick_clock_;
Clock clock_;
base::TimeDelta time_elapsed_;
};
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index 68483fc..419ae31 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -39,7 +39,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
natural_size_(0, 0),
volume_(1.0f),
playback_rate_(0.0f),
- clock_(new Clock(&default_clock_)),
+ clock_(new Clock(&default_tick_clock_)),
waiting_for_clock_update_(false),
status_(PIPELINE_OK),
has_audio_(false),
@@ -49,7 +49,7 @@ Pipeline::Pipeline(const scoped_refptr<base::MessageLoopProxy>& message_loop,
video_ended_(false),
audio_disabled_(false),
demuxer_(NULL),
- creation_time_(base::Time::Now()) {
+ creation_time_(default_tick_clock_.NowTicks()) {
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
media_log_->AddEvent(
media_log_->CreateEvent(MediaLogEvent::PIPELINE_CREATED));
@@ -213,9 +213,9 @@ void Pipeline::SetErrorForTesting(PipelineStatus status) {
void Pipeline::SetState(State next_state) {
if (state_ != kStarted && next_state == kStarted &&
!creation_time_.is_null()) {
- UMA_HISTOGRAM_TIMES(
- "Media.TimeToPipelineStarted", base::Time::Now() - creation_time_);
- creation_time_ = base::Time();
+ UMA_HISTOGRAM_TIMES("Media.TimeToPipelineStarted",
+ default_tick_clock_.NowTicks() - creation_time_);
+ creation_time_ = base::TimeTicks();
}
DVLOG(2) << GetStateString(state_) << " -> " << GetStateString(next_state);
diff --git a/media/base/pipeline.h b/media/base/pipeline.h
index 0ca7d62..09ff904 100644
--- a/media/base/pipeline.h
+++ b/media/base/pipeline.h
@@ -11,7 +11,7 @@
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
-#include "base/time/default_clock.h"
+#include "base/time/default_tick_clock.h"
#include "media/base/audio_renderer.h"
#include "media/base/demuxer.h"
#include "media/base/media_export.h"
@@ -373,8 +373,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// the filters.
float playback_rate_;
- // base::Clock used by |clock_|.
- base::DefaultClock default_clock_;
+ // base::TickClock used by |clock_|.
+ base::DefaultTickClock default_tick_clock_;
// Reference clock. Keeps track of current playback time. Uses system
// clock and linear interpolation, but can have its time manually set
@@ -439,7 +439,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Time of pipeline creation; is non-zero only until the pipeline first
// reaches "kStarted", at which point it is used & zeroed out.
- base::Time creation_time_;
+ base::TimeTicks creation_time_;
scoped_ptr<SerialRunner> pending_callbacks_;
diff --git a/media/base/pipeline_unittest.cc b/media/base/pipeline_unittest.cc
index 778a036..a121ee4 100644
--- a/media/base/pipeline_unittest.cc
+++ b/media/base/pipeline_unittest.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/message_loop.h"
#include "base/stl_util.h"
-#include "base/test/simple_test_clock.h"
+#include "base/test/simple_test_tick_clock.h"
#include "base/threading/simple_thread.h"
#include "base/time/clock.h"
#include "media/base/clock.h"
@@ -286,7 +286,7 @@ class PipelineTest : public ::testing::Test {
// Fixture members.
StrictMock<CallbackHelper> callbacks_;
- base::SimpleTestClock test_clock_;
+ base::SimpleTestTickClock test_tick_clock_;
base::MessageLoop message_loop_;
scoped_ptr<Pipeline> pipeline_;
@@ -606,7 +606,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Replace the clock so we can simulate wallclock time advancing w/o using
// Sleep().
- pipeline_->SetClockForTesting(new Clock(&test_clock_));
+ pipeline_->SetClockForTesting(new Clock(&test_tick_clock_));
InitializeDemuxer(&streams, duration);
InitializeAudioRenderer(audio_stream(), false);
@@ -627,7 +627,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Verify that the clock doesn't advance since it hasn't been started by
// a time update from the audio stream.
int64 start_time = pipeline_->GetMediaTime().ToInternalValue();
- test_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
EXPECT_EQ(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of audio stream.
@@ -636,7 +636,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Verify that the clock advances.
start_time = pipeline_->GetMediaTime().ToInternalValue();
- test_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
+ test_tick_clock_.Advance(base::TimeDelta::FromMilliseconds(100));
EXPECT_GT(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of video stream and make sure OnEnded() callback occurs.
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
index 9afd550..8a4cd23 100644
--- a/media/filters/audio_renderer_impl.cc
+++ b/media/filters/audio_renderer_impl.cc
@@ -49,7 +49,7 @@ AudioRendererImpl::AudioRendererImpl(
sink_(sink),
decoder_selector_(new AudioDecoderSelector(
message_loop, decoders.Pass(), set_decryptor_ready_cb)),
- now_cb_(base::Bind(&base::Time::Now)),
+ now_cb_(base::Bind(&base::TimeTicks::Now)),
state_(kUninitialized),
sink_playing_(false),
pending_read_(false),
@@ -524,7 +524,7 @@ uint32 AudioRendererImpl::FillBuffer(uint8* dest,
// Otherwise the buffer has data we can send to the device.
frames_written = algorithm_->FillBuffer(dest, requested_frames);
if (frames_written == 0) {
- base::Time now = now_cb_.Run();
+ const base::TimeTicks now = now_cb_.Run();
if (received_end_of_stream_ && !rendered_end_of_stream_ &&
now >= earliest_end_time_) {
@@ -598,7 +598,8 @@ uint32 AudioRendererImpl::FillBuffer(uint8* dest,
}
void AudioRendererImpl::UpdateEarliestEndTime_Locked(
- int frames_filled, base::TimeDelta playback_delay, base::Time time_now) {
+ int frames_filled, const base::TimeDelta& playback_delay,
+ const base::TimeTicks& time_now) {
if (frames_filled <= 0)
return;
diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h
index d96bfdd..43c106d 100644
--- a/media/filters/audio_renderer_impl.h
+++ b/media/filters/audio_renderer_impl.h
@@ -88,7 +88,7 @@ class MEDIA_EXPORT AudioRendererImpl
void DisableUnderflowForTesting();
// Allows injection of a custom time callback for non-realtime testing.
- typedef base::Callback<base::Time()> NowCB;
+ typedef base::Callback<base::TimeTicks()> NowCB;
void set_now_cb_for_testing(const NowCB& now_cb) {
now_cb_ = now_cb;
}
@@ -131,8 +131,8 @@ class MEDIA_EXPORT AudioRendererImpl
// Estimate earliest time when current buffer can stop playing.
void UpdateEarliestEndTime_Locked(int frames_filled,
- base::TimeDelta playback_delay,
- base::Time time_now);
+ const base::TimeDelta& playback_delay,
+ const base::TimeTicks& time_now);
void DoPlay();
void DoPause();
@@ -203,7 +203,7 @@ class MEDIA_EXPORT AudioRendererImpl
// Callback provided to Preroll().
PipelineStatusCB preroll_cb_;
- // Typically calls base::Time::Now() but can be overridden by a test.
+ // Typically calls base::TimeTicks::Now() but can be overridden by a test.
NowCB now_cb_;
// After Initialize() has completed, all variables below must be accessed
@@ -256,7 +256,7 @@ class MEDIA_EXPORT AudioRendererImpl
// empty till that time. Workaround is not bulletproof, as we don't exactly
// know when that particular data would start playing, but it is much better
// than nothing.
- base::Time earliest_end_time_;
+ base::TimeTicks earliest_end_time_;
size_t total_frames_filled_;
bool underflow_disabled_;
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index 19a7d14..d9c8f3d 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -19,6 +19,7 @@
#include "testing/gtest/include/gtest/gtest.h"
using ::base::Time;
+using ::base::TimeTicks;
using ::base::TimeDelta;
using ::testing::_;
using ::testing::AnyNumber;
@@ -334,7 +335,7 @@ class AudioRendererImplTest : public ::testing::Test {
scoped_ptr<AudioRendererImpl> renderer_;
private:
- Time GetTime() {
+ TimeTicks GetTime() {
base::AutoLock auto_lock(lock_);
return time_;
}
@@ -367,7 +368,7 @@ class AudioRendererImplTest : public ::testing::Test {
// Used for stubbing out time in the audio callback thread.
base::Lock lock_;
- Time time_;
+ TimeTicks time_;
// Used for satisfying reads.
AudioDecoder::ReadCB read_cb_;