summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/browser/renderer_host/media/audio_input_sync_writer.cc2
-rw-r--r--content/browser/renderer_host/media/audio_renderer_host.cc14
-rw-r--r--content/browser/renderer_host/media/audio_sync_reader.cc22
-rw-r--r--content/browser/renderer_host/media/audio_sync_reader.h2
-rw-r--r--content/renderer/media/audio_device.cc15
-rw-r--r--content/renderer/media/audio_input_device.cc6
-rw-r--r--content/renderer/media/audio_renderer_impl.cc52
-rw-r--r--content/renderer/media/audio_renderer_impl.h38
-rw-r--r--content/renderer/media/audio_renderer_impl_unittest.cc22
-rw-r--r--media/audio/audio_util.cc40
-rw-r--r--media/audio/audio_util.h15
-rw-r--r--webkit/glue/webmediaplayer_impl.cc6
12 files changed, 203 insertions, 31 deletions
diff --git a/content/browser/renderer_host/media/audio_input_sync_writer.cc b/content/browser/renderer_host/media/audio_input_sync_writer.cc
index b027479..8add5b5 100644
--- a/content/browser/renderer_host/media/audio_input_sync_writer.cc
+++ b/content/browser/renderer_host/media/audio_input_sync_writer.cc
@@ -4,6 +4,8 @@
#include "content/browser/renderer_host/media/audio_input_sync_writer.h"
+#include <algorithm>
+
#include "base/process_util.h"
#include "base/shared_memory.h"
diff --git a/content/browser/renderer_host/media/audio_renderer_host.cc b/content/browser/renderer_host/media/audio_renderer_host.cc
index 1e74d34..ca8fd30 100644
--- a/content/browser/renderer_host/media/audio_renderer_host.cc
+++ b/content/browser/renderer_host/media/audio_renderer_host.cc
@@ -12,6 +12,7 @@
#include "content/browser/renderer_host/media/media_observer.h"
#include "content/browser/resource_context.h"
#include "content/common/media/audio_messages.h"
+#include "media/audio/audio_util.h"
#include "ipc/ipc_logging.h"
@@ -144,8 +145,10 @@ void AudioRendererHost::DoCompleteCreation(
}
Send(new AudioMsg_NotifyLowLatencyStreamCreated(
- entry->stream_id, foreign_memory_handle,
- foreign_socket_handle, entry->shared_memory.created_size()));
+ entry->stream_id,
+ foreign_memory_handle,
+ foreign_socket_handle,
+ media::PacketSizeSizeInBytes(entry->shared_memory.created_size())));
return;
}
@@ -242,7 +245,12 @@ void AudioRendererHost::OnCreateStream(
scoped_ptr<AudioEntry> entry(new AudioEntry());
// Create the shared memory and share with the renderer process.
- if (!entry->shared_memory.CreateAndMapAnonymous(packet_size)) {
+ uint32 shared_memory_size = packet_size;
+ if (low_latency) {
+ shared_memory_size =
+ media::TotalSharedMemorySizeInBytes(shared_memory_size);
+ }
+ if (!entry->shared_memory.CreateAndMapAnonymous(shared_memory_size)) {
// If creation of shared memory failed then send an error message.
SendErrorMessage(stream_id);
return;
diff --git a/content/browser/renderer_host/media/audio_sync_reader.cc b/content/browser/renderer_host/media/audio_sync_reader.cc
index b6f779e..98c069f 100644
--- a/content/browser/renderer_host/media/audio_sync_reader.cc
+++ b/content/browser/renderer_host/media/audio_sync_reader.cc
@@ -4,11 +4,15 @@
#include "content/browser/renderer_host/media/audio_sync_reader.h"
+#include <algorithm>
+
#include "base/process_util.h"
#include "base/shared_memory.h"
+#include "media/audio/audio_buffers_state.h"
+#include "media/audio/audio_util.h"
AudioSyncReader::AudioSyncReader(base::SharedMemory* shared_memory)
- : shared_memory_(shared_memory) {
+ : shared_memory_(shared_memory) {
}
AudioSyncReader::~AudioSyncReader() {
@@ -20,9 +24,21 @@ void AudioSyncReader::UpdatePendingBytes(uint32 bytes) {
}
uint32 AudioSyncReader::Read(void* data, uint32 size) {
- uint32 read_size = std::min(size, shared_memory_->created_size());
+ uint32 max_size = media::PacketSizeSizeInBytes(
+ shared_memory_->created_size());
+ uint32 read_size = std::min(media::GetActualDataSizeInBytes(shared_memory_,
+ max_size),
+ size);
+
+ // Get the data from the buffer.
memcpy(data, shared_memory_->memory(), read_size);
- memset(shared_memory_->memory(), 0, shared_memory_->created_size());
+
+ // Zero out the entire buffer.
+ memset(shared_memory_->memory(), 0, max_size);
+
+ // Store max length of data into buffer, in case client does not do that.
+ media::SetActualDataSizeInBytes(shared_memory_, max_size, max_size);
+
return read_size;
}
diff --git a/content/browser/renderer_host/media/audio_sync_reader.h b/content/browser/renderer_host/media/audio_sync_reader.h
index ddb5d61..acb064d 100644
--- a/content/browser/renderer_host/media/audio_sync_reader.h
+++ b/content/browser/renderer_host/media/audio_sync_reader.h
@@ -12,9 +12,7 @@
#include "media/audio/audio_output_controller.h"
namespace base {
-
class SharedMemory;
-
}
// A AudioOutputController::SyncReader implementation using SyncSocket. This
diff --git a/content/renderer/media/audio_device.cc b/content/renderer/media/audio_device.cc
index 5a53236..2cfb981 100644
--- a/content/renderer/media/audio_device.cc
+++ b/content/renderer/media/audio_device.cc
@@ -162,6 +162,8 @@ void AudioDevice::OnLowLatencyCreated(
shared_memory_.reset(new base::SharedMemory(handle, false));
shared_memory_->Map(length);
+ DCHECK_GE(length, buffer_size_ * sizeof(int16) * channels_);
+
socket_.reset(new base::SyncSocket(socket_handle));
// Allow the client to pre-populate the buffer.
FireRenderCallback();
@@ -191,13 +193,13 @@ void AudioDevice::Run() {
const int samples_per_ms = static_cast<int>(sample_rate_) / 1000;
const int bytes_per_ms = channels_ * (bits_per_sample_ / 8) * samples_per_ms;
- while (sizeof(pending_data) == socket_->Receive(&pending_data,
- sizeof(pending_data)) &&
- pending_data >= 0) {
+ while ((sizeof(pending_data) == socket_->Receive(&pending_data,
+ sizeof(pending_data))) &&
+ (pending_data >= 0)) {
+
// Convert the number of pending bytes in the render buffer
// into milliseconds.
audio_delay_milliseconds_ = pending_data / bytes_per_ms;
-
FireRenderCallback();
}
}
@@ -210,8 +212,9 @@ void AudioDevice::FireRenderCallback() {
callback_->Render(audio_data_, buffer_size_, audio_delay_milliseconds_);
// Interleave, scale, and clip to int16.
- int16* output_buffer16 = static_cast<int16*>(shared_memory_data());
- media::InterleaveFloatToInt16(audio_data_, output_buffer16, buffer_size_);
+ media::InterleaveFloatToInt16(audio_data_,
+ static_cast<int16*>(shared_memory_data()),
+ buffer_size_);
}
}
diff --git a/content/renderer/media/audio_input_device.cc b/content/renderer/media/audio_input_device.cc
index 5bfecea..284e2e9 100644
--- a/content/renderer/media/audio_input_device.cc
+++ b/content/renderer/media/audio_input_device.cc
@@ -54,8 +54,8 @@ bool AudioInputDevice::Start() {
params.samples_per_packet = buffer_size_;
ChildProcess::current()->io_message_loop()->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &AudioInputDevice::InitializeOnIOThread, params));
+ FROM_HERE,
+ NewRunnableMethod(this, &AudioInputDevice::InitializeOnIOThread, params));
return true;
}
@@ -158,7 +158,7 @@ void AudioInputDevice::Run() {
while (sizeof(pending_data) == socket_->Receive(&pending_data,
sizeof(pending_data)) &&
- pending_data >= 0) {
+ pending_data >= 0) {
// TODO(henrika): investigate the provided |pending_data| value
// and ensure that it is actually an accurate delay estimation.
diff --git a/content/renderer/media/audio_renderer_impl.cc b/content/renderer/media/audio_renderer_impl.cc
index 14643ab..899bf44 100644
--- a/content/renderer/media/audio_renderer_impl.cc
+++ b/content/renderer/media/audio_renderer_impl.cc
@@ -6,13 +6,17 @@
#include <math.h>
-#include "content/common/child_process.h"
+#include <algorithm>
+
#include "base/command_line.h"
+#include "content/common/child_process.h"
#include "content/common/content_switches.h"
#include "content/common/media/audio_messages.h"
#include "content/renderer/render_thread.h"
#include "content/renderer/render_view.h"
+#include "media/audio/audio_buffers_state.h"
#include "media/audio/audio_output_controller.h"
+#include "media/audio/audio_util.h"
#include "media/base/filter_host.h"
// Static variable that says what code path we are using -- low or high
@@ -62,6 +66,23 @@ base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) {
return base::TimeDelta();
}
+void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now) {
+ if (bytes_filled != 0) {
+ base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled);
+ float playback_rate = GetPlaybackRate();
+ if (playback_rate != 1.0f) {
+ predicted_play_time = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(predicted_play_time.InMicroseconds() *
+ playback_rate)));
+ }
+ earliest_end_time_ =
+ std::max(earliest_end_time_,
+ time_now + request_delay + predicted_play_time);
+ }
+}
+
bool AudioRendererImpl::OnInitialize(const media::AudioDecoderConfig& config) {
AudioParameters params(config);
params.format = AudioParameters::AUDIO_PCM_LINEAR;
@@ -244,7 +265,7 @@ void AudioRendererImpl::OnLowLatencyCreated(
return;
shared_memory_.reset(new base::SharedMemory(handle, false));
- shared_memory_->Map(length);
+ shared_memory_->Map(media::TotalSharedMemorySizeInBytes(length));
shared_memory_size_ = length;
CreateSocket(socket_handle);
@@ -320,6 +341,7 @@ void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) {
void AudioRendererImpl::PlayTask() {
DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop());
+ earliest_end_time_ = base::Time::Now();
Send(new AudioHostMsg_PlayStream(stream_id_));
}
@@ -332,6 +354,7 @@ void AudioRendererImpl::PauseTask() {
void AudioRendererImpl::SeekTask() {
DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop());
+ earliest_end_time_ = base::Time::Now();
// We have to pause the audio stream before we can flush.
Send(new AudioHostMsg_PauseStream(stream_id_));
Send(new AudioHostMsg_FlushStream(stream_id_));
@@ -396,10 +419,18 @@ void AudioRendererImpl::NotifyPacketReadyTask() {
GetPlaybackRate())));
}
+ bool buffer_empty = (request_buffers_state_.pending_bytes == 0) &&
+ (current_time >= earliest_end_time_);
+
+ // For high latency mode we don't write length into shared memory,
+ // it is explicit part of AudioHostMsg_NotifyPacketReady() message,
+ // so no need to reserve first word of buffer for length.
uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
shared_memory_size_, request_delay,
- request_buffers_state_.pending_bytes == 0);
+ buffer_empty);
+ UpdateEarliestEndTime(filled, request_delay, current_time);
pending_request_ = false;
+
// Then tell browser process we are done filling into the buffer.
Send(new AudioHostMsg_NotifyPacketReady(stream_id_, filled));
}
@@ -426,7 +457,6 @@ void AudioRendererImpl::Run() {
int bytes;
while (sizeof(bytes) == socket_->Receive(&bytes, sizeof(bytes))) {
- LOG(ERROR) << "+++ bytes: " << bytes;
if (bytes == media::AudioOutputController::kPauseMark)
continue;
else if (bytes < 0)
@@ -439,16 +469,22 @@ void AudioRendererImpl::Run() {
continue;
DCHECK(shared_memory_.get());
base::TimeDelta request_delay = ConvertToDuration(bytes);
+
// We need to adjust the delay according to playback rate.
if (playback_rate != 1.0f) {
request_delay = base::TimeDelta::FromMicroseconds(
static_cast<int64>(ceil(request_delay.InMicroseconds() *
playback_rate)));
}
- FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
- shared_memory_size_,
- request_delay,
- true /* buffers empty */);
+ base::Time time_now = base::Time::Now();
+ uint32 size = FillBuffer(static_cast<uint8*>(shared_memory_->memory()),
+ shared_memory_size_,
+ request_delay,
+ time_now >= earliest_end_time_);
+ media::SetActualDataSizeInBytes(shared_memory_.get(),
+ shared_memory_size_,
+ size);
+ UpdateEarliestEndTime(size, request_delay, time_now);
}
}
diff --git a/content/renderer/media/audio_renderer_impl.h b/content/renderer/media/audio_renderer_impl.h
index c234da0..65e2e73 100644
--- a/content/renderer/media/audio_renderer_impl.h
+++ b/content/renderer/media/audio_renderer_impl.h
@@ -59,7 +59,7 @@ class AudioRendererImpl
public MessageLoop::DestructionObserver {
public:
// Methods called on Render thread ------------------------------------------
- explicit AudioRendererImpl();
+ AudioRendererImpl();
virtual ~AudioRendererImpl();
// Methods called on IO thread ----------------------------------------------
@@ -106,6 +106,7 @@ class AudioRendererImpl
FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, Stop);
FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest,
DestroyedMessageLoop_ConsumeAudioSamples);
+ FRIEND_TEST_ALL_PREFIXES(AudioRendererImplTest, UpdateEarliestEndTime);
// Helper methods.
// Convert number of bytes to duration of time using information about the
// number of channels, sample rate and sample bits.
@@ -139,16 +140,33 @@ class AudioRendererImpl
virtual void CreateAudioThread();
// Accessors used by tests.
- LatencyType latency_type() {
+ static LatencyType latency_type() {
return latency_type_;
}
+ base::Time earliest_end_time() const {
+ return earliest_end_time_;
+ }
+
+ void set_earliest_end_time(const base::Time& earliest_end_time) {
+ earliest_end_time_ = earliest_end_time;
+ }
+
+ uint32 bytes_per_second() const {
+ return bytes_per_second_;
+ }
+
// Should be called before any class instance is created.
static void set_latency_type(LatencyType latency_type);
// Helper method for IPC send calls.
void Send(IPC::Message* message);
+ // Estimate earliest time when current buffer can stop playing.
+ void UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now);
+
// Used to calculate audio delay given bytes.
uint32 bytes_per_second_;
@@ -189,6 +207,22 @@ class AudioRendererImpl
// Remaining bytes for prerolling to complete.
uint32 preroll_bytes_;
+ // We're supposed to know amount of audio data OS or hardware buffered, but
+ // that is not always so -- on my Linux box
+ // AudioBuffersState::hardware_delay_bytes never reaches 0.
+ //
+ // As a result we cannot use it to find when stream ends. If we just ignore
+ // buffered data we will notify host that stream ended before it is actually
+ // did so, I've seen it done ~140ms too early when playing ~150ms file.
+ //
+ // Instead of trying to invent OS-specific solution for each and every OS we
+ // are supporting, use simple workaround: every time we fill the buffer we
+ // remember when it should stop playing, and do not assume that buffer is
+ // empty till that time. Workaround is not bulletproof, as we don't exactly
+ // know when that particular data would start playing, but it is much better
+ // than nothing.
+ base::Time earliest_end_time_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRendererImpl);
};
diff --git a/content/renderer/media/audio_renderer_impl_unittest.cc b/content/renderer/media/audio_renderer_impl_unittest.cc
index 478eb63..862dffa 100644
--- a/content/renderer/media/audio_renderer_impl_unittest.cc
+++ b/content/renderer/media/audio_renderer_impl_unittest.cc
@@ -113,7 +113,12 @@ class AudioRendererImplTest
public:
static void SetUpTestCase() {
// Set low latency mode, as it soon would be on by default.
- AudioRendererImpl::set_latency_type(AudioRendererImpl::kLowLatency);
+ if (AudioRendererImpl::latency_type() ==
+ AudioRendererImpl::kUninitializedLatency) {
+ AudioRendererImpl::set_latency_type(AudioRendererImpl::kLowLatency);
+ }
+ DCHECK_EQ(AudioRendererImpl::kLowLatency,
+ AudioRendererImpl::latency_type());
}
// IPC::Channel::Listener implementation.
@@ -325,3 +330,18 @@ TEST_F(AudioRendererImplTest, DestroyedMessageLoop_ConsumeAudioSamples) {
renderer_->ConsumeAudioSamples(buffer);
renderer_->Stop(media::NewExpectedCallback());
}
+
+TEST_F(AudioRendererImplTest, UpdateEarliestEndTime) {
+ renderer_->SetPlaybackRate(1.0f);
+ WaitForIOThreadCompletion();
+ base::Time time_now = base::Time(); // Null time by default.
+ renderer_->set_earliest_end_time(time_now);
+ renderer_->UpdateEarliestEndTime(renderer_->bytes_per_second(),
+ base::TimeDelta::FromMilliseconds(100),
+ time_now);
+ int time_delta = (renderer_->earliest_end_time() - time_now).InMilliseconds();
+ EXPECT_EQ(1100, time_delta);
+ renderer_->Stop(media::NewExpectedCallback());
+ WaitForIOThreadCompletion();
+}
+
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index 18e2abb..fe5fb10 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -8,13 +8,19 @@
// Implemented as templates to allow 8, 16 and 32 bit implementations.
// 8 bit is unsigned and biased by 128.
+#include <algorithm>
+
+#include "base/atomicops.h"
#include "base/basictypes.h"
#include "base/logging.h"
+#include "base/shared_memory.h"
#include "media/audio/audio_util.h"
#if defined(OS_MACOSX)
#include "media/audio/mac/audio_low_latency_output_mac.h"
#endif
+using base::subtle::Atomic32;
+
namespace media {
// TODO(fbarchard): Convert to intrinsics for better efficiency.
@@ -234,4 +240,38 @@ double GetAudioHardwareSampleRate()
#endif
}
+// When transferring data in the shared memory, first word is size of data
+// in bytes. Actual data starts immediately after it.
+
+uint32 TotalSharedMemorySizeInBytes(uint32 packet_size) {
+ // Need to reserve extra 4 bytes for size of data.
+ return packet_size + sizeof(Atomic32);
+}
+
+uint32 PacketSizeSizeInBytes(uint32 shared_memory_created_size) {
+ return shared_memory_created_size - sizeof(Atomic32);
+}
+
+uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 shared_memory_size) {
+ char* ptr = static_cast<char*>(shared_memory->memory()) + shared_memory_size;
+ DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
+
+ // Actual data size stored in the beginning of the buffer.
+ uint32 actual_data_size =
+ base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
+ return std::min(actual_data_size, shared_memory_size);
+}
+
+void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 shared_memory_size,
+ uint32 actual_data_size) {
+ char* ptr = static_cast<char*>(shared_memory->memory()) + shared_memory_size;
+ DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
+
+ // Set actual data size in the beginning of the buffer.
+ base::subtle::Release_Store(reinterpret_cast<volatile Atomic32*>(ptr),
+ actual_data_size);
+}
+
} // namespace media
diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h
index 0d65e19..a2fa15e 100644
--- a/media/audio/audio_util.h
+++ b/media/audio/audio_util.h
@@ -9,6 +9,10 @@
#include "base/basictypes.h"
+namespace base {
+class SharedMemory;
+}
+
namespace media {
// For all audio functions 3 audio formats are supported:
@@ -77,6 +81,17 @@ void InterleaveFloatToInt16(const std::vector<float*>& source,
// Returns the default audio hardware sample-rate.
double GetAudioHardwareSampleRate();
+// Functions that handle data buffer passed between processes in the shared
+// memory. Called on both IPC sides.
+
+uint32 TotalSharedMemorySizeInBytes(uint32 packet_size);
+uint32 PacketSizeSizeInBytes(uint32 shared_memory_created_size);
+uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 shared_memory_size);
+void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 shared_memory_size,
+ uint32 actual_data_size);
+
} // namespace media
#endif // MEDIA_AUDIO_AUDIO_UTIL_H_
diff --git a/webkit/glue/webmediaplayer_impl.cc b/webkit/glue/webmediaplayer_impl.cc
index 033cc13..7aa9a58 100644
--- a/webkit/glue/webmediaplayer_impl.cc
+++ b/webkit/glue/webmediaplayer_impl.cc
@@ -349,9 +349,9 @@ void WebMediaPlayerImpl::setVisible(bool visible) {
}
#define COMPILE_ASSERT_MATCHING_ENUM(webkit_name, chromium_name) \
- COMPILE_ASSERT(int(WebKit::WebMediaPlayer::webkit_name) == \
- int(media::chromium_name), \
- mismatching_enums)
+ COMPILE_ASSERT(static_cast<int>(WebKit::WebMediaPlayer::webkit_name) == \
+ static_cast<int>(media::chromium_name), \
+ mismatching_enums)
COMPILE_ASSERT_MATCHING_ENUM(None, NONE);
COMPILE_ASSERT_MATCHING_ENUM(MetaData, METADATA);
COMPILE_ASSERT_MATCHING_ENUM(Auto, AUTO);