summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorrch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-10 00:06:15 +0000
committerrch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-10 00:06:15 +0000
commit0175b30c6c73ce2c2fc9633b98683d71b4fafcaa (patch)
tree49cff801700df181bf5bf79e835615a9edd003e7 /net
parent2e648601b082774989dec39da9983a5e27e9a797 (diff)
downloadchromium_src-0175b30c6c73ce2c2fc9633b98683d71b4fafcaa.zip
chromium_src-0175b30c6c73ce2c2fc9633b98683d71b4fafcaa.tar.gz
chromium_src-0175b30c6c73ce2c2fc9633b98683d71b4fafcaa.tar.bz2
Added pacing and a leaky bucket helper class.
Merge internal change 37031721 Reverted: 166980 Initially Committed: 166977 Review URL: https://chromiumcodereview.appspot.com/11312174 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167013 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/net.gyp6
-rw-r--r--net/quic/congestion_control/fix_rate_sender.cc57
-rw-r--r--net/quic/congestion_control/fix_rate_sender.h11
-rw-r--r--net/quic/congestion_control/fix_rate_test.cc39
-rw-r--r--net/quic/congestion_control/leaky_bucket.cc51
-rw-r--r--net/quic/congestion_control/leaky_bucket.h47
-rw-r--r--net/quic/congestion_control/leaky_bucket_test.cc70
-rw-r--r--net/quic/congestion_control/paced_sender.cc73
-rw-r--r--net/quic/congestion_control/paced_sender.h43
-rw-r--r--net/quic/congestion_control/paced_sender_test.cc46
-rw-r--r--net/quic/congestion_control/quic_send_scheduler.cc25
-rw-r--r--net/quic/congestion_control/quic_send_scheduler.h11
-rw-r--r--net/quic/congestion_control/quic_send_scheduler_test.cc57
-rw-r--r--net/quic/congestion_control/send_algorithm_interface.h4
14 files changed, 459 insertions, 81 deletions
diff --git a/net/net.gyp b/net/net.gyp
index f7d0a3c..513cee8 100644
--- a/net/net.gyp
+++ b/net/net.gyp
@@ -640,6 +640,10 @@
'quic/congestion_control/fix_rate_receiver.h',
'quic/congestion_control/fix_rate_sender.cc',
'quic/congestion_control/fix_rate_sender.h',
+ 'quic/congestion_control/leaky_bucket.cc',
+ 'quic/congestion_control/leaky_bucket.h',
+ 'quic/congestion_control/paced_sender.cc',
+ 'quic/congestion_control/paced_sender.h',
'quic/congestion_control/quic_receipt_metrics_collector.cc',
'quic/congestion_control/quic_receipt_metrics_collector.h',
'quic/congestion_control/quic_send_scheduler.cc',
@@ -1421,6 +1425,8 @@
'proxy/proxy_service_unittest.cc',
'proxy/sync_host_resolver_bridge_unittest.cc',
'quic/congestion_control/fix_rate_test.cc',
+ 'quic/congestion_control/leaky_bucket_test.cc',
+ 'quic/congestion_control/paced_sender_test.cc',
'quic/congestion_control/quic_receipt_metrics_collector_test.cc',
'quic/congestion_control/quic_send_scheduler_test.cc',
'quic/crypto/crypto_framer_test.cc',
diff --git a/net/quic/congestion_control/fix_rate_sender.cc b/net/quic/congestion_control/fix_rate_sender.cc
index e9af848..ac7a3c3 100644
--- a/net/quic/congestion_control/fix_rate_sender.cc
+++ b/net/quic/congestion_control/fix_rate_sender.cc
@@ -7,20 +7,20 @@
#include <math.h>
#include "base/logging.h"
+#include "base/time.h"
#include "net/quic/quic_protocol.h"
namespace {
- static const int kInitialBitrate = 100000; // In bytes per second.
- static const int kNoBytesSent = 0;
+ const int kInitialBitrate = 100000; // In bytes per second.
+ const uint64 kWindowSizeUs = 10000; // 10 ms.
}
namespace net {
FixRateSender::FixRateSender(QuicClock* clock)
- : bitrate_in_bytes_per_second_(kInitialBitrate),
- clock_(clock),
- time_last_sent_us_(0),
- bytes_last_sent_(kNoBytesSent),
+ : bitrate_in_bytes_per_s_(kInitialBitrate),
+ fix_rate_leaky_bucket_(clock, kInitialBitrate),
+ paced_sender_(clock, kInitialBitrate),
bytes_in_flight_(0) {
DLOG(INFO) << "FixRateSender";
}
@@ -30,8 +30,10 @@ void FixRateSender::OnIncomingCongestionInfo(
DCHECK(congestion_info.type == kFixRate) <<
"Invalid incoming CongestionFeedbackType:" << congestion_info.type;
if (congestion_info.type == kFixRate) {
- bitrate_in_bytes_per_second_ =
+ bitrate_in_bytes_per_s_ =
congestion_info.fix_rate.bitrate_in_bytes_per_second;
+ fix_rate_leaky_bucket_.SetDrainingRate(bitrate_in_bytes_per_s_);
+ paced_sender_.UpdateBandwidthEstimate(bitrate_in_bytes_per_s_);
}
// Silently ignore invalid messages in release mode.
}
@@ -49,40 +51,45 @@ void FixRateSender::OnIncomingLoss(int /*number_of_lost_packets*/) {
void FixRateSender::SentPacket(QuicPacketSequenceNumber /*sequence_number*/,
size_t bytes,
bool retransmit) {
- if (bytes > 0) {
- time_last_sent_us_ = clock_->NowInUsec();
- bytes_last_sent_ = bytes;
- }
+ fix_rate_leaky_bucket_.Add(bytes);
+ paced_sender_.SentPacket(bytes);
if (!retransmit) {
bytes_in_flight_ += bytes;
}
}
int FixRateSender::TimeUntilSend(bool /*retransmit*/) {
- if (time_last_sent_us_ == 0 && bytes_last_sent_ == kNoBytesSent) {
- // No sent packets.
- return 0; // We can send now.
+ if (CongestionWindow() > fix_rate_leaky_bucket_.BytesPending()) {
+ if (CongestionWindow() <= bytes_in_flight_) {
+ return kUnknownWaitTime; // We need an ack before we send more.
+ }
+ return paced_sender_.TimeUntilSend(0);
}
- uint64 elapsed_time_us = clock_->NowInUsec() - time_last_sent_us_;
- uint64 time_to_transmit_us = (bytes_last_sent_ * 1000000) /
- bitrate_in_bytes_per_second_;
- if (elapsed_time_us > time_to_transmit_us) {
- return 0; // We can send now.
+ uint64 time_remaining_us = fix_rate_leaky_bucket_.TimeRemaining();
+ if (time_remaining_us == 0) {
+ return kUnknownWaitTime; // We need an ack before we send more.
}
- return time_to_transmit_us - elapsed_time_us;
+ return paced_sender_.TimeUntilSend(time_remaining_us);
+}
+
+size_t FixRateSender::CongestionWindow() {
+ size_t window_size = bitrate_in_bytes_per_s_ * kWindowSizeUs /
+ base::Time::kMicrosecondsPerSecond;
+ // Make sure window size is not less than a packet.
+ return std::max(kMaxPacketSize, window_size);
}
size_t FixRateSender::AvailableCongestionWindow() {
- if (TimeUntilSend(false) > 0) {
+ size_t congestion_window = CongestionWindow();
+ if (bytes_in_flight_ >= congestion_window) {
return 0;
}
- // Note: Since this is for testing only we have the total window size equal to
- // kMaxPacketSize.
- return kMaxPacketSize - bytes_in_flight_;
+ size_t available_congestion_window = congestion_window - bytes_in_flight_;
+ return paced_sender_.AvailableWindow(available_congestion_window);
}
int FixRateSender::BandwidthEstimate() {
- return bitrate_in_bytes_per_second_;
+ return bitrate_in_bytes_per_s_;
}
} // namespace net
diff --git a/net/quic/congestion_control/fix_rate_sender.h b/net/quic/congestion_control/fix_rate_sender.h
index c60a194..1c751cca 100644
--- a/net/quic/congestion_control/fix_rate_sender.h
+++ b/net/quic/congestion_control/fix_rate_sender.h
@@ -11,6 +11,8 @@
#include "base/compiler_specific.h"
#include "net/base/net_export.h"
#include "net/quic/quic_clock.h"
+#include "net/quic/congestion_control/leaky_bucket.h"
+#include "net/quic/congestion_control/paced_sender.h"
#include "net/quic/congestion_control/send_algorithm_interface.h"
namespace net {
@@ -34,10 +36,11 @@ class NET_EXPORT_PRIVATE FixRateSender : public SendAlgorithmInterface {
// End implementation of SendAlgorithmInterface.
private:
- uint32 bitrate_in_bytes_per_second_;
- QuicClock* clock_;
- uint64 time_last_sent_us_;
- int bytes_last_sent_;
+ size_t CongestionWindow();
+
+ uint32 bitrate_in_bytes_per_s_;
+ LeakyBucket fix_rate_leaky_bucket_;
+ PacedSender paced_sender_;
size_t bytes_in_flight_;
};
diff --git a/net/quic/congestion_control/fix_rate_test.cc b/net/quic/congestion_control/fix_rate_test.cc
index 3372c02..169d4c7 100644
--- a/net/quic/congestion_control/fix_rate_test.cc
+++ b/net/quic/congestion_control/fix_rate_test.cc
@@ -12,6 +12,10 @@
#include "net/quic/quic_protocol.h"
#include "testing/gtest/include/gtest/gtest.h"
+namespace {
+ const int rtt_us = 30000;
+}
+
namespace net {
class FixRateTest : public ::testing::Test {
@@ -19,6 +23,7 @@ class FixRateTest : public ::testing::Test {
void SetUp() {
sender_.reset(new FixRateSender(&clock_));
receiver_.reset(new FixRateReceiver());
+ clock_.AdvanceTime(0.002); // Make sure clock does not start at 0.
}
MockClock clock_;
scoped_ptr<FixRateSender> sender_;
@@ -39,36 +44,46 @@ TEST_F(FixRateTest, SenderAPI) {
info.type = kFixRate;
info.fix_rate.bitrate_in_bytes_per_second = 300000;
sender_->OnIncomingCongestionInfo(info);
+ EXPECT_EQ(300000, sender_->BandwidthEstimate());
+ EXPECT_EQ(0, sender_->TimeUntilSend(false));
+ EXPECT_EQ(kMaxPacketSize * 2, sender_->AvailableCongestionWindow());
+ sender_->SentPacket(1, kMaxPacketSize, false);
+ EXPECT_EQ(3000-kMaxPacketSize, sender_->AvailableCongestionWindow());
EXPECT_EQ(0, sender_->TimeUntilSend(false));
- EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
- sender_->SentPacket(1, 1200, false);
+ sender_->SentPacket(2, kMaxPacketSize, false);
+ sender_->SentPacket(3, 600, false);
+ EXPECT_EQ(10000, sender_->TimeUntilSend(false));
EXPECT_EQ(0u, sender_->AvailableCongestionWindow());
- EXPECT_EQ(300000, sender_->BandwidthEstimate());
- EXPECT_EQ(4000, sender_->TimeUntilSend(false));
- clock_.AdvanceTime(0.002);
- EXPECT_EQ(2000, sender_->TimeUntilSend(false));
clock_.AdvanceTime(0.002);
+ EXPECT_EQ(kUnknownWaitTime, sender_->TimeUntilSend(false));
+ clock_.AdvanceTime(0.008);
+ sender_->OnIncomingAck(1, kMaxPacketSize, rtt_us);
+ sender_->OnIncomingAck(2, kMaxPacketSize, rtt_us);
+ sender_->OnIncomingAck(3, 600, rtt_us);
EXPECT_EQ(0, sender_->TimeUntilSend(false));
}
-TEST_F(FixRateTest, Pacing) {
+TEST_F(FixRateTest, FixRatePacing) {
const int packet_size = 1200;
- const int rtt_us = 30000;
CongestionInfo info;
receiver_->SetBitrate(240000); // Bytes per second.
ASSERT_TRUE(receiver_->GenerateCongestionInfo(&info));
sender_->OnIncomingCongestionInfo(info);
double acc_advance_time = 0.0;
+ QuicPacketSequenceNumber sequence_number = 0;
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(0, sender_->TimeUntilSend(false));
- EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
- sender_->SentPacket(i, packet_size, false);
+ EXPECT_EQ(kMaxPacketSize * 2u, sender_->AvailableCongestionWindow());
+ sender_->SentPacket(sequence_number++, packet_size, false);
+ EXPECT_EQ(0, sender_->TimeUntilSend(false));
+ sender_->SentPacket(sequence_number++, packet_size, false);
double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
clock_.AdvanceTime(advance_time);
- sender_->OnIncomingAck(i, packet_size, rtt_us);
+ sender_->OnIncomingAck(sequence_number - 1, packet_size, rtt_us);
+ sender_->OnIncomingAck(sequence_number - 2, packet_size, rtt_us);
acc_advance_time += advance_time;
}
- EXPECT_EQ(500, floor((acc_advance_time * 1000) + 0.5));
+ EXPECT_EQ(1000, floor((acc_advance_time * 1000) + 0.5));
}
} // namespace net
diff --git a/net/quic/congestion_control/leaky_bucket.cc b/net/quic/congestion_control/leaky_bucket.cc
new file mode 100644
index 0000000..4b390f4
--- /dev/null
+++ b/net/quic/congestion_control/leaky_bucket.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/quic/congestion_control/leaky_bucket.h"
+
+#include "base/time.h"
+
+namespace net {
+
+LeakyBucket::LeakyBucket(QuicClock* clock, int bytes_per_second)
+ : clock_(clock),
+ bytes_(0),
+ draining_rate_bytes_per_s_(bytes_per_second) {
+}
+
+void LeakyBucket::SetDrainingRate(int bytes_per_second) {
+ Update();
+ draining_rate_bytes_per_s_ = bytes_per_second;
+}
+
+void LeakyBucket::Add(size_t bytes) {
+ Update();
+ bytes_ += bytes;
+}
+
+uint64 LeakyBucket::TimeRemaining() {
+ Update();
+ uint64 time_remaining_us = (bytes_ * base::Time::kMicrosecondsPerSecond) /
+ draining_rate_bytes_per_s_;
+ return time_remaining_us;
+}
+
+size_t LeakyBucket::BytesPending() {
+ Update();
+ return bytes_;
+}
+
+void LeakyBucket::Update() {
+ uint64 elapsed_time_us = clock_->NowInUsec() - time_last_updated_us_;
+ size_t bytes_cleared = (elapsed_time_us * draining_rate_bytes_per_s_) /
+ base::Time::kMicrosecondsPerSecond;
+ if (bytes_cleared >= bytes_) {
+ bytes_ = 0;
+ } else {
+ bytes_ -= bytes_cleared;
+ }
+ time_last_updated_us_ = clock_->NowInUsec();
+}
+
+} // namespace net
diff --git a/net/quic/congestion_control/leaky_bucket.h b/net/quic/congestion_control/leaky_bucket.h
new file mode 100644
index 0000000..a3641d7
--- /dev/null
+++ b/net/quic/congestion_control/leaky_bucket.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Helper class to track the rate data can leave the buffer for pacing.
+// A leaky bucket drains the data at a constant rate regardless of fullness of
+// the buffer.
+// See http://en.wikipedia.org/wiki/Leaky_bucket for more details.
+
+#ifndef GFE_QUIC_CONGESTION_CONTROL_LEAKY_BUCKET_H_
+#define GFE_QUIC_CONGESTION_CONTROL_LEAKY_BUCKET_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+#include "net/quic/quic_clock.h"
+
+namespace net {
+
+class NET_EXPORT_PRIVATE LeakyBucket {
+ public:
+ // clock is not owned by this class.
+ LeakyBucket(QuicClock* clock, int bytes_per_second);
+
+ // Set the rate at which the bytes leave the buffer.
+ void SetDrainingRate(int bytes_per_second);
+
+ // Add data to the buffer.
+ void Add(size_t bytes);
+
+ // Time until the buffer is empty in us.
+ uint64 TimeRemaining();
+
+ // Number of bytes in the buffer.
+ size_t BytesPending();
+
+ private:
+ void Update();
+
+ QuicClock* clock_;
+ size_t bytes_;
+ uint64 time_last_updated_us_;
+ int draining_rate_bytes_per_s_;
+};
+
+} // namespace net
+
+#endif // GFE_QUIC_CONGESTION_CONTROL_LEAKY_BUCKET_H_
diff --git a/net/quic/congestion_control/leaky_bucket_test.cc b/net/quic/congestion_control/leaky_bucket_test.cc
new file mode 100644
index 0000000..5b4e18a
--- /dev/null
+++ b/net/quic/congestion_control/leaky_bucket_test.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/quic/congestion_control/leaky_bucket.h"
+#include "net/quic/test_tools/mock_clock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace net {
+
+class LeakyBucketTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ leaky_bucket_.reset(new LeakyBucket(&clock_, 0));
+ }
+ MockClock clock_;
+ scoped_ptr<LeakyBucket> leaky_bucket_;
+};
+
+TEST_F(LeakyBucketTest, Basic) {
+ int bytes_per_second = 200000;
+ leaky_bucket_->SetDrainingRate(bytes_per_second);
+ leaky_bucket_->Add(2000);
+ EXPECT_EQ(2000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(10000u, leaky_bucket_->TimeRemaining());
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(1000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(5000u, leaky_bucket_->TimeRemaining());
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(0u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(0u, leaky_bucket_->TimeRemaining());
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(0u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(0u, leaky_bucket_->TimeRemaining());
+ leaky_bucket_->Add(2000);
+ clock_.AdvanceTime(0.011);
+ EXPECT_EQ(0u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(0u, leaky_bucket_->TimeRemaining());
+ leaky_bucket_->Add(2000);
+ clock_.AdvanceTime(0.005);
+ leaky_bucket_->Add(2000);
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(2000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(10000u, leaky_bucket_->TimeRemaining());
+ clock_.AdvanceTime(0.010);
+ EXPECT_EQ(0u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(0u, leaky_bucket_->TimeRemaining());
+}
+
+TEST_F(LeakyBucketTest, ChangeDrainRate) {
+ int bytes_per_second = 200000;
+ leaky_bucket_->SetDrainingRate(bytes_per_second);
+ leaky_bucket_->Add(2000);
+ EXPECT_EQ(2000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(10000u, leaky_bucket_->TimeRemaining());
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(1000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(5000u, leaky_bucket_->TimeRemaining());
+ bytes_per_second = 100000; // Cut drain rate in half.
+ leaky_bucket_->SetDrainingRate(bytes_per_second);
+ EXPECT_EQ(1000u, leaky_bucket_->BytesPending());
+ EXPECT_EQ(10000u, leaky_bucket_->TimeRemaining());
+}
+
+} // namespace net
diff --git a/net/quic/congestion_control/paced_sender.cc b/net/quic/congestion_control/paced_sender.cc
new file mode 100644
index 0000000..a35bd02
--- /dev/null
+++ b/net/quic/congestion_control/paced_sender.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/quic/congestion_control/paced_sender.h"
+
+#include "base/time.h"
+#include "net/quic/quic_protocol.h"
+
+namespace net {
+
+// To prevent too aggressive pacing we allow the following packet burst size.
+const size_t kMinPacketBurstSize = 2;
+// Max estimated time between calls to TimeUntilSend and
+// AvailableCongestionWindow.
+const int kMaxSchedulingDelayUs = 2000;
+
+PacedSender::PacedSender(QuicClock* clock, int bytes_per_s)
+ : leaky_bucket_(clock, bytes_per_s),
+ pace_in_bytes_per_s_(bytes_per_s) {
+}
+
+void PacedSender::UpdateBandwidthEstimate(int bytes_per_s) {
+ leaky_bucket_.SetDrainingRate(bytes_per_s);
+ pace_in_bytes_per_s_ = bytes_per_s;
+}
+
+void PacedSender::SentPacket(size_t bytes) {
+ leaky_bucket_.Add(bytes);
+}
+
+int PacedSender::TimeUntilSend(int time_until_send_us) {
+ if (time_until_send_us < kMaxSchedulingDelayUs) {
+ // Pace the data.
+ size_t pacing_window = kMaxSchedulingDelayUs * pace_in_bytes_per_s_ /
+ base::Time::kMicrosecondsPerSecond;
+ size_t min_window_size = kMinPacketBurstSize * kMaxPacketSize;
+ pacing_window = std::max(pacing_window, min_window_size);
+
+ if (pacing_window > leaky_bucket_.BytesPending()) {
+ // We have not filled our pacing window yet.
+ return time_until_send_us;
+ }
+ return leaky_bucket_.TimeRemaining();
+ }
+ return time_until_send_us;
+}
+
+size_t PacedSender::AvailableWindow(size_t available_congestion_window) {
+ size_t accuracy_window = (kMaxSchedulingDelayUs * pace_in_bytes_per_s_) /
+ base::Time::kMicrosecondsPerSecond;
+ size_t min_burst_window = kMinPacketBurstSize * kMaxPacketSize;
+ DLOG(INFO) << "Available congestion window:" << available_congestion_window
+ << " accuracy window:" << accuracy_window
+ << " min burst window:" << min_burst_window;
+
+ // Should we limit the window to pace the data?
+ if (available_congestion_window > min_burst_window &&
+ available_congestion_window > accuracy_window) {
+ // Max window depends on estimated bandwidth; higher bandwidth => larger
+ // burst we also consider our timing accuracy. An accuracy of 1 ms will
+ // allow us to send up to 19.2Mbit/s with 2 packets per burst.
+ available_congestion_window = std::max(min_burst_window, accuracy_window);
+ size_t bytes_pending = leaky_bucket_.BytesPending();
+ if (bytes_pending > available_congestion_window) {
+ return 0;
+ }
+ available_congestion_window -= bytes_pending;
+ }
+ return available_congestion_window;
+}
+
+} // namespace net
diff --git a/net/quic/congestion_control/paced_sender.h b/net/quic/congestion_control/paced_sender.h
new file mode 100644
index 0000000..cd9d7dd
--- /dev/null
+++ b/net/quic/congestion_control/paced_sender.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Helper class that limits the congestion window to pace the packets.
+
+#ifndef GFE_QUIC_CONGESTION_CONTROL_PACED_SENDER_H_
+#define GFE_QUIC_CONGESTION_CONTROL_PACED_SENDER_H_
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+#include "net/quic/congestion_control/leaky_bucket.h"
+#include "net/quic/quic_clock.h"
+
+namespace net {
+
+class NET_EXPORT_PRIVATE PacedSender {
+ public:
+ PacedSender(QuicClock* clock, int bandwidth_estimate_bytes_per_s);
+
+ // The estimated bandidth from the congestion algorithm changed.
+ void UpdateBandwidthEstimate(int bytes_per_s);
+
+ // A packet of size bytes was sent.
+ void SentPacket(size_t bytes);
+
+ // Return time until we can send based on the pacing.
+ int TimeUntilSend(int time_until_send_us);
+
+ // Return the amount of data in bytes we can send based on the pacing.
+ // available_congestion_window is the congestion algorithms available
+ // congestion window in bytes.
+ size_t AvailableWindow(size_t available_congestion_window);
+
+ private:
+ // Helper object to track the rate data can leave the buffer for pacing.
+ LeakyBucket leaky_bucket_;
+ int pace_in_bytes_per_s_;
+};
+
+} // namespace net
+
+#endif // GFE_QUIC_CONGESTION_CONTROL_PACED_SENDER_H_
diff --git a/net/quic/congestion_control/paced_sender_test.cc b/net/quic/congestion_control/paced_sender_test.cc
new file mode 100644
index 0000000..c5b3215
--- /dev/null
+++ b/net/quic/congestion_control/paced_sender_test.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/quic/congestion_control/paced_sender.h"
+#include "net/quic/quic_protocol.h"
+#include "net/quic/test_tools/mock_clock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace net {
+namespace testing {
+
+const int kHundredKBytesPerS = 100000;
+
+class PacedSenderTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ paced_sender_.reset(new PacedSender(&clock_, kHundredKBytesPerS));
+ }
+ MockClock clock_;
+ scoped_ptr<net::PacedSender> paced_sender_;
+};
+
+TEST_F(PacedSenderTest, Basic) {
+ paced_sender_->UpdateBandwidthEstimate(kHundredKBytesPerS * 10);
+ EXPECT_EQ(0, paced_sender_->TimeUntilSend(0));
+ EXPECT_EQ(kMaxPacketSize * 2,
+ paced_sender_->AvailableWindow(kMaxPacketSize * 4));
+ paced_sender_->SentPacket(kMaxPacketSize);
+ EXPECT_EQ(0, paced_sender_->TimeUntilSend(0));
+ paced_sender_->SentPacket(kMaxPacketSize);
+ EXPECT_EQ(int(kMaxPacketSize * 2), paced_sender_->TimeUntilSend(0));
+ EXPECT_EQ(0u, paced_sender_->AvailableWindow(kMaxPacketSize * 4));
+ clock_.AdvanceTime(0.0024);
+ EXPECT_EQ(0, paced_sender_->TimeUntilSend(0));
+ EXPECT_EQ(kMaxPacketSize * 2,
+ paced_sender_->AvailableWindow(kMaxPacketSize * 4));
+}
+
+} // namespace testing
+} // namespace net
diff --git a/net/quic/congestion_control/quic_send_scheduler.cc b/net/quic/congestion_control/quic_send_scheduler.cc
index 9df5db7..20c1033 100644
--- a/net/quic/congestion_control/quic_send_scheduler.cc
+++ b/net/quic/congestion_control/quic_send_scheduler.cc
@@ -24,7 +24,6 @@ QuicSendScheduler::QuicSendScheduler(
: clock_(clock),
current_estimated_bandwidth_(-1),
max_estimated_bandwidth_(-1),
- last_sent_packet_us_(0),
current_packet_bucket_(-1),
first_packet_bucket_(-1),
send_algorithm_(SendAlgorithmInterface::Create(clock, type)) {
@@ -68,11 +67,10 @@ void QuicSendScheduler::SentPacket(QuicPacketSequenceNumber sequence_number,
bool retransmit) {
int bucket = UpdatePacketHistory();
packet_history_[bucket] += bytes;
- last_sent_packet_us_ = clock_->NowInUsec();
send_algorithm_->SentPacket(sequence_number, bytes, retransmit);
if (!retransmit) {
pending_packets_[sequence_number] = new PendingPacket(bytes,
- last_sent_packet_us_);
+ clock_->NowInUsec());
}
DLOG(INFO) << "Sent sequence number:" << sequence_number;
}
@@ -127,23 +125,16 @@ int QuicSendScheduler::TimeUntilSend(bool retransmit) {
}
size_t QuicSendScheduler::AvailableCongestionWindow() {
- size_t available_congestion_window =
- send_algorithm_->AvailableCongestionWindow();
- DLOG(INFO) << "Available congestion window:" << available_congestion_window;
-
- // Should we limit the window to pace the data?
- if (available_congestion_window > kMinPacketBurstSize * kMaxPacketSize) {
- // TODO(pwestin): implement pacing.
- // will depend on estimated bandwidth; higher bandwidth => larger burst
- // we need to consider our timing accuracy here too.
- // an accuracy of 1ms will allow us to send up to 19.2Mbit/s with 2 packets
- // per burst.
- }
- return available_congestion_window;
+ return send_algorithm_->AvailableCongestionWindow();
}
int QuicSendScheduler::BandwidthEstimate() {
- return send_algorithm_->BandwidthEstimate();
+ int bandwidth_estimate = send_algorithm_->BandwidthEstimate();
+ if (bandwidth_estimate == kNoValidEstimate) {
+ // If we don't have a valid estimate use the send rate.
+ return SentBandwidth();
+ }
+ return bandwidth_estimate;
}
bool QuicSendScheduler::HasSentPacket() {
diff --git a/net/quic/congestion_control/quic_send_scheduler.h b/net/quic/congestion_control/quic_send_scheduler.h
index 2d1379a..a5ef712 100644
--- a/net/quic/congestion_control/quic_send_scheduler.h
+++ b/net/quic/congestion_control/quic_send_scheduler.h
@@ -5,7 +5,7 @@
// The is the base class for QUIC send side congestion control.
// It decides when we can send a QUIC packet to the wire.
// This class handles the basic bookkeeping of sent bitrate and packet loss.
-// The acctual send side algorithm is implemented via the
+// The actual send side algorithm is implemented via the
// SendAlgorithmInterface.
#ifndef NET_QUIC_CONGESTION_CONTROL_QUIC_SEND_SCHEDULER_H_
@@ -28,10 +28,6 @@ const uint32 kBitrateSmoothingBuckets = 300;
// implementation due to overflow resulting in a potential divide by zero.
const uint32 kBitrateSmoothingPeriod = 10000;
-// When kUnknownWaitTime is returned, there is no need to poll the function
-// again until we receive a new event.
-const int kUnknownWaitTime = -1;
-
class NET_EXPORT_PRIVATE QuicSendScheduler {
public:
class PendingPacket {
@@ -49,6 +45,10 @@ class NET_EXPORT_PRIVATE QuicSendScheduler {
};
typedef std::map<QuicPacketSequenceNumber, PendingPacket*> PendingPacketsMap;
+ // Enable pacing to prevent a large congestion window to be sent all at once,
+ // when pacing is enabled a large congestion window will be sent in multiple
+ // bursts of packet(s) instead of one big burst that might introduce packet
+ // loss.
QuicSendScheduler(QuicClock* clock, CongestionFeedbackType congestion_type);
virtual ~QuicSendScheduler();
@@ -89,7 +89,6 @@ class NET_EXPORT_PRIVATE QuicSendScheduler {
QuicClock* clock_;
int current_estimated_bandwidth_;
int max_estimated_bandwidth_;
- uint64 last_sent_packet_us_;
// To keep track of the real sent bitrate we keep track of the last sent bytes
// by keeping an array containing the number of bytes sent in a short timespan
// kBitrateSmoothingPeriod; multiple of these buckets kBitrateSmoothingBuckets
diff --git a/net/quic/congestion_control/quic_send_scheduler_test.cc b/net/quic/congestion_control/quic_send_scheduler_test.cc
index 8325f96..9af97a4 100644
--- a/net/quic/congestion_control/quic_send_scheduler_test.cc
+++ b/net/quic/congestion_control/quic_send_scheduler_test.cc
@@ -20,7 +20,6 @@ class QuicSendSchedulerTest : public ::testing::Test {
void SetUpCongestionType(CongestionFeedbackType congestion_type) {
sender_.reset(new QuicSendScheduler(&clock_, congestion_type));
}
-
MockClock clock_;
scoped_ptr<QuicSendScheduler> sender_;
};
@@ -29,18 +28,18 @@ TEST_F(QuicSendSchedulerTest, FixedRateSenderAPI) {
SetUpCongestionType(kFixRate);
QuicAckFrame ack;
ack.congestion_info.type = kFixRate;
- ack.congestion_info.fix_rate.bitrate_in_bytes_per_second = 300000;
+ ack.congestion_info.fix_rate.bitrate_in_bytes_per_second = 30000;
sender_->OnIncomingAckFrame(ack);
EXPECT_EQ(-1, sender_->PeakSustainedBandwidth());
EXPECT_EQ(0, sender_->TimeUntilSend(false));
EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
- sender_->SentPacket(1, 1200, false);
+ sender_->SentPacket(1, kMaxPacketSize, false);
EXPECT_EQ(0u, sender_->AvailableCongestionWindow());
- EXPECT_EQ(4000, sender_->TimeUntilSend(false));
- clock_.AdvanceTime(0.002);
- EXPECT_EQ(2000, sender_->TimeUntilSend(false));
- clock_.AdvanceTime(0.002);
- EXPECT_EQ(0, sender_->TimeUntilSend(false));
+ EXPECT_EQ(40000, sender_->TimeUntilSend(false));
+ clock_.AdvanceTime(0.035);
+ EXPECT_EQ(kUnknownWaitTime, sender_->TimeUntilSend(false));
+ clock_.AdvanceTime(0.005);
+ EXPECT_EQ(kUnknownWaitTime, sender_->TimeUntilSend(false));
}
TEST_F(QuicSendSchedulerTest, FixedRatePacing) {
@@ -54,7 +53,7 @@ TEST_F(QuicSendSchedulerTest, FixedRatePacing) {
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(0, sender_->TimeUntilSend(false));
EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
- sender_->SentPacket(i, 1200, false);
+ sender_->SentPacket(i, kMaxPacketSize, false);
double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
clock_.AdvanceTime(advance_time);
acc_advance_time += advance_time;
@@ -75,9 +74,8 @@ TEST_F(QuicSendSchedulerTest, DISABLED_AvailableCongestionWindow) {
EXPECT_EQ(0, sender_->TimeUntilSend(false));
EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
for (int i = 1; i <= 12; i++) {
+ EXPECT_EQ(0, sender_->TimeUntilSend(false));
sender_->SentPacket(i, 100, false);
- double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
- clock_.AdvanceTime(advance_time);
EXPECT_EQ(kMaxPacketSize - (i * 100), sender_->AvailableCongestionWindow());
}
// Ack the packet we sent.
@@ -103,8 +101,8 @@ TEST_F(QuicSendSchedulerTest, FixedRateBandwidth) {
sender_->OnIncomingAckFrame(ack);
}
EXPECT_EQ(100000, sender_->BandwidthEstimate());
- EXPECT_EQ(100000, sender_->PeakSustainedBandwidth());
- EXPECT_EQ(100000, sender_->SentBandwidth());
+ EXPECT_EQ(101010, sender_->PeakSustainedBandwidth());
+ EXPECT_EQ(101010, sender_->SentBandwidth());
}
TEST_F(QuicSendSchedulerTest, BandwidthWith3SecondGap) {
@@ -114,8 +112,7 @@ TEST_F(QuicSendSchedulerTest, BandwidthWith3SecondGap) {
ack.congestion_info.fix_rate.bitrate_in_bytes_per_second = 100000;
sender_->OnIncomingAckFrame(ack);
for (int i = 0; i < 100; ++i) {
- double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
- clock_.AdvanceTime(advance_time);
+ clock_.AdvanceTime(0.010);
EXPECT_EQ(0, sender_->TimeUntilSend(false));
EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
sender_->SentPacket(i, 1000, false);
@@ -136,8 +133,7 @@ TEST_F(QuicSendSchedulerTest, BandwidthWith3SecondGap) {
EXPECT_EQ(0, sender_->TimeUntilSend(false));
EXPECT_EQ(kMaxPacketSize, sender_->AvailableCongestionWindow());
sender_->SentPacket(i + 100, 1000, false);
- double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
- clock_.AdvanceTime(advance_time);
+ clock_.AdvanceTime(0.010);
// Ack the packet we sent.
ack.received_info.largest_received = i + 100;
sender_->OnIncomingAckFrame(ack);
@@ -147,4 +143,31 @@ TEST_F(QuicSendSchedulerTest, BandwidthWith3SecondGap) {
EXPECT_EQ(50000, sender_->SentBandwidth());
}
+TEST_F(QuicSendSchedulerTest, Pacing) {
+ SetUpCongestionType(kFixRate);
+ QuicAckFrame ack;
+ ack.congestion_info.type = kFixRate;
+ // Test a high bitrate (8Mbit/s) to trigger pacing.
+ ack.congestion_info.fix_rate.bitrate_in_bytes_per_second = 1000000;
+ ack.received_info.largest_received = 0;
+ sender_->OnIncomingAckFrame(ack);
+ double acc_advance_time = 0.0;
+ for (int i = 0; i < 100;) {
+ EXPECT_EQ(0, sender_->TimeUntilSend(false));
+ EXPECT_EQ(kMaxPacketSize * 2, sender_->AvailableCongestionWindow());
+ sender_->SentPacket(i++, kMaxPacketSize, false);
+ EXPECT_EQ(0, sender_->TimeUntilSend(false));
+ sender_->SentPacket(i++, kMaxPacketSize, false);
+ double advance_time = sender_->TimeUntilSend(false) / 1000000.0;
+ clock_.AdvanceTime(advance_time);
+ acc_advance_time += advance_time;
+ // Ack the packets we sent.
+ ack.received_info.largest_received = i - 2;
+ sender_->OnIncomingAckFrame(ack);
+ ack.received_info.largest_received = i - 1;
+ sender_->OnIncomingAckFrame(ack);
+ }
+ EXPECT_EQ(120, floor((acc_advance_time * 1000) + 0.5));
+}
+
} // namespace net
diff --git a/net/quic/congestion_control/send_algorithm_interface.h b/net/quic/congestion_control/send_algorithm_interface.h
index b219661..0d5aa6e 100644
--- a/net/quic/congestion_control/send_algorithm_interface.h
+++ b/net/quic/congestion_control/send_algorithm_interface.h
@@ -14,6 +14,9 @@
namespace net {
+const int kNoValidEstimate = -1;
+const int kUnknownWaitTime = -1;
+
class NET_EXPORT_PRIVATE SendAlgorithmInterface {
public:
static SendAlgorithmInterface* Create(QuicClock* clock,
@@ -47,6 +50,7 @@ class NET_EXPORT_PRIVATE SendAlgorithmInterface {
virtual size_t AvailableCongestionWindow() = 0;
// What's the current estimated bandwidth in bytes per second.
+ // Returns KNoValidEstimate when it does not have an estimate.
virtual int BandwidthEstimate() = 0;
};