summaryrefslogtreecommitdiffstats
path: root/content/renderer
diff options
context:
space:
mode:
authorenal@chromium.org <enal@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-15 21:51:27 +0000
committerenal@chromium.org <enal@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-15 21:51:27 +0000
commit06e639cdc5cf9b7d3e97c7d7c57920b56b4b6e17 (patch)
tree048f6ff41defac63e23924bc88b16a6fb439ed4b /content/renderer
parentd466b8a0b44ef89c81fa4ede86de1b488b8ec07d (diff)
downloadchromium_src-06e639cdc5cf9b7d3e97c7d7c57920b56b4b6e17.zip
chromium_src-06e639cdc5cf9b7d3e97c7d7c57920b56b4b6e17.tar.gz
chromium_src-06e639cdc5cf9b7d3e97c7d7c57920b56b4b6e17.tar.bz2
Fix problem when 'ended' event was fired before stream really ended.
That caused impression that rewind does not work. With that change small JS program var a = new Audio("file:///home/enal/temp/click2/click2.wav"); var num_played = 0; a.addEventListener('canplaythrough', function() { a.play(); }); a.addEventListener('ended', function() { num_played ++; if (num_played < 10) { a.currentTime = 0; a.play(); } }); works correctly, you hear 10 clicks one after another, and it takes ~1.5 seconds to play all 10 sounds (one click is 146ms). Current Chrome plays only beginnings of the first 9 clicks and then entire 10th click -- 'ended' event fires too early, so rewind stops audio playback for all clicks but last one. With that fix you can easily create pool of audio objects -- on 'ended' event just add audio object to the pool. Fix consists of 3 parts: 1) For low-latency code path pass entire "audio state" object to the renderer process. That allows renderer take into account number of pending bytes in the buffer. 2) When using low-latency code path renderer not only fills the buffer with data, but also writes length of data into first word of the buffer. That allows host process to pass correct byte counts to renderer. 3) Renderer now keeps track of the earliest time playback can end based on the number of rendered bytes, and will not call 'ended' callback till that time. BUG=http://code.google.com/p/chromium/issues/detail?id=78992 http://codereview.chromium.org/7328030 Review URL: http://codereview.chromium.org/7328030 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@92749 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer')
-rw-r--r--content/renderer/media/audio_device.cc28
-rw-r--r--content/renderer/media/audio_input_device.cc28
-rw-r--r--content/renderer/media/audio_renderer_impl.cc62
-rw-r--r--content/renderer/media/audio_renderer_impl.h34
-rw-r--r--content/renderer/media/audio_renderer_impl_unittest.cc12
5 files changed, 130 insertions, 34 deletions
diff --git a/content/renderer/media/audio_device.cc b/content/renderer/media/audio_device.cc
index e00b370..d812c83 100644
--- a/content/renderer/media/audio_device.cc
+++ b/content/renderer/media/audio_device.cc
@@ -9,6 +9,7 @@
#include "content/common/media/audio_messages.h"
#include "content/common/view_messages.h"
#include "content/renderer/render_thread.h"
+#include "media/audio/audio_buffers_state.h"
#include "media/audio/audio_util.h"
AudioDevice::AudioDevice(size_t buffer_size,
@@ -161,6 +162,8 @@ void AudioDevice::OnLowLatencyCreated(
shared_memory_.reset(new base::SharedMemory(handle, false));
shared_memory_->Map(length);
+ DCHECK_GE(length, buffer_size_ * sizeof(int16) + sizeof(uint32));
+
socket_.reset(new base::SyncSocket(socket_handle));
// Allow the client to pre-populate the buffer.
FireRenderCallback();
@@ -186,16 +189,17 @@ void AudioDevice::Send(IPC::Message* message) {
void AudioDevice::Run() {
audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
- int pending_data;
+ AudioBuffersState buffer_state;
const int samples_per_ms = static_cast<int>(sample_rate_) / 1000;
const int bytes_per_ms = channels_ * (bits_per_sample_ / 8) * samples_per_ms;
- while (sizeof(pending_data) == socket_->Receive(&pending_data,
- sizeof(pending_data)) &&
- pending_data >= 0) {
- // Convert the number of pending bytes in the render buffer
- // into milliseconds.
- audio_delay_milliseconds_ = pending_data / bytes_per_ms;
+ while (buffer_state.Receive(socket_.get()) &&
+ (buffer_state.total_bytes() >= 0)) {
+ {
+ // Convert the number of pending bytes in the render buffer
+ // into milliseconds.
+ audio_delay_milliseconds_ = buffer_state.total_bytes() / bytes_per_ms;
+ }
FireRenderCallback();
}
@@ -207,7 +211,13 @@ void AudioDevice::FireRenderCallback() {
callback_->Render(audio_data_, buffer_size_, audio_delay_milliseconds_);
// Interleave, scale, and clip to int16.
- int16* output_buffer16 = static_cast<int16*>(shared_memory_data());
- media::InterleaveFloatToInt16(audio_data_, output_buffer16, buffer_size_);
+ media::InterleaveFloatToInt16(
+ audio_data_,
+ static_cast<int16*>(media::GetDataPointer(shared_memory())),
+ buffer_size_);
+
+ // Consumer should know how much data was written.
+ media::SetActualDataSizeInBytes(shared_memory(),
+ buffer_size_ * sizeof(int16));
}
}
diff --git a/content/renderer/media/audio_input_device.cc b/content/renderer/media/audio_input_device.cc
index 5bfecea..de1bfb0 100644
--- a/content/renderer/media/audio_input_device.cc
+++ b/content/renderer/media/audio_input_device.cc
@@ -9,6 +9,7 @@
#include "content/common/media/audio_messages.h"
#include "content/common/view_messages.h"
#include "content/renderer/render_thread.h"
+#include "media/audio/audio_buffers_state.h"
#include "media/audio/audio_util.h"
AudioInputDevice::AudioInputDevice(size_t buffer_size,
@@ -54,8 +55,8 @@ bool AudioInputDevice::Start() {
params.samples_per_packet = buffer_size_;
ChildProcess::current()->io_message_loop()->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &AudioInputDevice::InitializeOnIOThread, params));
+ FROM_HERE,
+ NewRunnableMethod(this, &AudioInputDevice::InitializeOnIOThread, params));
return true;
}
@@ -128,6 +129,8 @@ void AudioInputDevice::OnLowLatencyCreated(
shared_memory_.reset(new base::SharedMemory(handle, false));
shared_memory_->Map(length);
+ DCHECK_GE(length, buffer_size_ + sizeof(uint32));
+
socket_.reset(new base::SyncSocket(socket_handle));
audio_thread_.reset(
@@ -152,19 +155,18 @@ void AudioInputDevice::Send(IPC::Message* message) {
void AudioInputDevice::Run() {
audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
- int pending_data;
+ AudioBuffersState buffer_state;
const int samples_per_ms = static_cast<int>(sample_rate_) / 1000;
const int bytes_per_ms = channels_ * (bits_per_sample_ / 8) * samples_per_ms;
- while (sizeof(pending_data) == socket_->Receive(&pending_data,
- sizeof(pending_data)) &&
- pending_data >= 0) {
- // TODO(henrika): investigate the provided |pending_data| value
- // and ensure that it is actually an accurate delay estimation.
+ while (buffer_state.Receive(socket_.get()) &&
+ (buffer_state.total_bytes() >= 0)) {
+ // TODO(henrika): investigate the provided |buffer_state.total_bytes()|
+ // value and ensure that it is actually an accurate delay estimation.
// Convert the number of pending bytes in the capture buffer
// into milliseconds.
- audio_delay_milliseconds_ = pending_data / bytes_per_ms;
+ audio_delay_milliseconds_ = buffer_state.total_bytes() / bytes_per_ms;
FireCaptureCallback();
}
@@ -174,10 +176,14 @@ void AudioInputDevice::FireCaptureCallback() {
if (!callback_)
return;
- const size_t number_of_frames = buffer_size_;
+ uint32 actual_buffer_size = media::GetActualDataSizeInBytes(shared_memory()) /
+ sizeof(int16);
+ DCHECK_LE(actual_buffer_size, buffer_size_);
+ const size_t number_of_frames = actual_buffer_size;
// Read 16-bit samples from shared memory (browser writes to it).
- int16* input_audio = static_cast<int16*>(shared_memory_data());
+ int16* input_audio = static_cast<int16*>(
+ media::GetDataPointer(shared_memory()));
const int bytes_per_sample = sizeof(input_audio[0]);
// Deinterleave each channel and convert to 32-bit floating-point
diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc
index 480da56..b3ad114 100644
--- a/content/renderer/media/audio_renderer_impl.cc
+++ b/content/renderer/media/audio_renderer_impl.cc
@@ -6,13 +6,17 @@
#include <math.h>
-#include "content/common/child_process.h"
+#include <algorithm>
+
#include "base/command_line.h"
+#include "content/common/child_process.h"
#include "content/common/content_switches.h"
#include "content/common/media/audio_messages.h"
#include "content/renderer/render_thread.h"
#include "content/renderer/render_view.h"
+#include "media/audio/audio_buffers_state.h"
#include "media/audio/audio_output_controller.h"
+#include "media/audio/audio_util.h"
#include "media/base/filter_host.h"
// Static variable that says what code path we are using -- low or high
@@ -62,6 +66,23 @@ base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) {
return base::TimeDelta();
}
+void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now) {
+ if (bytes_filled != 0) {
+ base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled);
+ float playback_rate = GetPlaybackRate();
+ if (playback_rate != 1.0f) {
+ predicted_play_time = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(predicted_play_time.InMicroseconds() *
+ playback_rate)));
+ }
+ earliest_end_time_ =
+ std::max(earliest_end_time_,
+ time_now + request_delay + predicted_play_time);
+ }
+}
+
bool AudioRendererImpl::OnInitialize(const media::AudioDecoderConfig& config) {
AudioParameters params(config);
params.format = AudioParameters::AUDIO_PCM_LINEAR;
@@ -320,6 +341,7 @@ void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) {
void AudioRendererImpl::PlayTask() {
DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop());
+ earliest_end_time_ = base::Time::Now();
Send(new AudioHostMsg_PlayStream(stream_id_));
}
@@ -332,6 +354,7 @@ void AudioRendererImpl::PauseTask() {
void AudioRendererImpl::SeekTask() {
DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop());
+ earliest_end_time_ = base::Time::Now();
// We have to pause the audio stream before we can flush.
Send(new AudioHostMsg_PauseStream(stream_id_));
Send(new AudioHostMsg_FlushStream(stream_id_));
@@ -393,10 +416,18 @@ void AudioRendererImpl::NotifyPacketReadyTask() {
GetPlaybackRate())));
}
+ bool buffer_empty = (request_buffers_state_.pending_bytes == 0) &&
+ (current_time >= earliest_end_time_);
+
+ // For high latency mode we don't write length into shared memory,
+ // it is explicit part of AudioHostMsg_NotifyPacketReady() message,
+ // so no need to reserve first word of buffer for length.
uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
shared_memory_size_, request_delay,
- request_buffers_state_.pending_bytes == 0);
+ buffer_empty);
+ UpdateEarliestEndTime(filled, request_delay, current_time);
pending_request_ = false;
+
// Then tell browser process we are done filling into the buffer.
Send(new AudioHostMsg_NotifyPacketReady(stream_id_, filled));
}
@@ -419,12 +450,11 @@ void AudioRendererImpl::WillDestroyCurrentMessageLoop() {
void AudioRendererImpl::Run() {
audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
- int bytes;
- while (sizeof(bytes) == socket_->Receive(&bytes, sizeof(bytes))) {
- LOG(ERROR) << "+++ bytes: " << bytes;
- if (bytes == media::AudioOutputController::kPauseMark)
+ AudioBuffersState buffer_state;
+ while (buffer_state.Receive(socket_.get())) {
+ if (buffer_state.total_bytes() == media::AudioOutputController::kPauseMark)
continue;
- else if (bytes < 0)
+ else if (buffer_state.total_bytes() < 0)
break;
base::AutoLock auto_lock(lock_);
if (stopped_)
@@ -433,17 +463,25 @@ void AudioRendererImpl::Run() {
if (playback_rate <= 0.0f)
continue;
DCHECK(shared_memory_.get());
- base::TimeDelta request_delay = ConvertToDuration(bytes);
+ base::TimeDelta request_delay =
+ ConvertToDuration(buffer_state.total_bytes());
+
// We need to adjust the delay according to playback rate.
if (playback_rate != 1.0f) {
request_delay = base::TimeDelta::FromMicroseconds(
static_cast<int64>(ceil(request_delay.InMicroseconds() *
playback_rate)));
}
- FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
- shared_memory_size_,
- request_delay,
- true /* buffers empty */);
+ base::Time time_now = base::Time::Now();
+ bool buffer_empty = (buffer_state.pending_bytes == 0) &&
+ (time_now >= earliest_end_time_);
+ void *data_buffer = media::GetDataPointer(shared_memory_.get());
+ uint32 size = FillBuffer(static_cast<uint8*>(data_buffer),
+ media::GetMaxDataSizeInBytes(shared_memory_size_),
+ request_delay,
+ buffer_empty);
+ media::SetActualDataSizeInBytes(shared_memory_.get(), size);
+ UpdateEarliestEndTime(size, request_delay, time_now);
}
}
diff --git a/content/renderer/media/audio_renderer_impl.h b/content/renderer/media/audio_renderer_impl.h
index 5ba9bc5..d9c73d0 100644
--- a/content/renderer/media/audio_renderer_impl.h
+++ b/content/renderer/media/audio_renderer_impl.h
@@ -58,7 +58,7 @@ class AudioRendererImpl : public media::AudioRendererBase,
public MessageLoop::DestructionObserver {
public:
// Methods called on Render thread ------------------------------------------
- explicit AudioRendererImpl();
+ AudioRendererImpl();
virtual ~AudioRendererImpl();
// Methods called on IO thread ----------------------------------------------
@@ -105,6 +105,7 @@ class AudioRendererImpl : public media::AudioRendererBase,
FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, Stop);
FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest,
DestroyedMessageLoop_ConsumeAudioSamples);
+ FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, UpdateEarliestEndTime);
// Helper methods.
// Convert number of bytes to duration of time using information about the
// number of channels, sample rate and sample bits.
@@ -138,16 +139,29 @@ class AudioRendererImpl : public media::AudioRendererBase,
virtual void CreateAudioThread();
// Accessors used by tests.
- LatencyType latency_type() {
+ LatencyType latency_type() const {
return latency_type_;
}
+ base::Time earliest_end_time() const {
+ return earliest_end_time_;
+ }
+
+ uint32 bytes_per_second() const {
+ return bytes_per_second_;
+ }
+
// Should be called before any class instance is created.
static void set_latency_type(LatencyType latency_type);
// Helper method for IPC send calls.
void Send(IPC::Message* message);
+ // Estimate earliest time when current buffer can stop playing.
+ void UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now);
+
// Used to calculate audio delay given bytes.
uint32 bytes_per_second_;
@@ -188,6 +202,22 @@ class AudioRendererImpl : public media::AudioRendererBase,
// Remaining bytes for prerolling to complete.
uint32 preroll_bytes_;
+ // We're supposed to know amount of audio data OS or hardware buffered, but
+ // that is not always so -- on my Linux box
+ // AudioBuffersState::hardware_delay_bytes never reaches 0.
+ //
+ // As a result we cannot use it to find when stream ends. If we just ignore
+ // buffered data we will notify host that stream ended before it is actually
+ // did so, I've seen it done ~140ms too early when playing ~150ms file.
+ //
+ // Instead of trying to invent OS-specific solution for each and every OS we
+ // are supporting, use simple workaround: every time we fill the buffer we
+ // remember when it should stop playing, and do not assume that buffer is
+ // empty till that time. Workaround is not bulletproof, as we don't exactly
+ // know when that particular data would start playing, but it is much better
+ // than nothing.
+ base::Time earliest_end_time_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
};
diff --git a/content/renderer/media/audio_renderer_impl_unittest.cc b/content/renderer/media/audio_renderer_impl_unittest.cc
index 478eb63..1a6b070 100644
--- a/content/renderer/media/audio_renderer_impl_unittest.cc
+++ b/content/renderer/media/audio_renderer_impl_unittest.cc
@@ -325,3 +325,15 @@ TEST_F(AudioRendererImplTest, DestroyedMessageLoop_ConsumeAudioSamples) {
renderer_->ConsumeAudioSamples(buffer);
renderer_->Stop(media::NewExpectedCallback());
}
+
+TEST_F(AudioRendererImplTest, UpdateEarliestEndTime) {
+ renderer_->SetPlaybackRate(1.0f);
+ base::Time time_now = base::Time(); // Null time by default.
+ renderer_->UpdateEarliestEndTime(renderer_->bytes_per_second(),
+ base::TimeDelta::FromMilliseconds(100),
+ time_now);
+ int time_delta = (renderer_->earliest_end_time() - time_now).InMilliseconds();
+ EXPECT_EQ(1100, time_delta);
+ renderer_->Stop(media::NewExpectedCallback());
+}
+