diff options
30 files changed, 389 insertions, 93 deletions
diff --git a/net/quic/congestion_control/fix_rate_sender.cc b/net/quic/congestion_control/fix_rate_sender.cc index 73482cc..51e16da 100644 --- a/net/quic/congestion_control/fix_rate_sender.cc +++ b/net/quic/congestion_control/fix_rate_sender.cc @@ -109,4 +109,8 @@ QuicByteCount FixRateSender::GetSlowStartThreshold() const { return 0; } +CongestionControlType FixRateSender::GetCongestionControlType() const { + return kFixRateCongestionControl; +} + } // namespace net diff --git a/net/quic/congestion_control/fix_rate_sender.h b/net/quic/congestion_control/fix_rate_sender.h index c50d8a9..eb6d4b6 100644 --- a/net/quic/congestion_control/fix_rate_sender.h +++ b/net/quic/congestion_control/fix_rate_sender.h @@ -52,6 +52,7 @@ class NET_EXPORT_PRIVATE FixRateSender : public SendAlgorithmInterface { virtual QuicByteCount GetCongestionWindow() const OVERRIDE; virtual bool InSlowStart() const OVERRIDE; virtual QuicByteCount GetSlowStartThreshold() const OVERRIDE; + virtual CongestionControlType GetCongestionControlType() const OVERRIDE; // End implementation of SendAlgorithmInterface. private: diff --git a/net/quic/congestion_control/pacing_sender.cc b/net/quic/congestion_control/pacing_sender.cc index f0f0e5d..a100839 100644 --- a/net/quic/congestion_control/pacing_sender.cc +++ b/net/quic/congestion_control/pacing_sender.cc @@ -174,4 +174,8 @@ QuicByteCount PacingSender::GetSlowStartThreshold() const { return sender_->GetSlowStartThreshold(); } +CongestionControlType PacingSender::GetCongestionControlType() const { + return sender_->GetCongestionControlType(); +} + } // namespace net diff --git a/net/quic/congestion_control/pacing_sender.h b/net/quic/congestion_control/pacing_sender.h index b85946c..be46df9 100644 --- a/net/quic/congestion_control/pacing_sender.h +++ b/net/quic/congestion_control/pacing_sender.h @@ -60,6 +60,7 @@ class NET_EXPORT_PRIVATE PacingSender : public SendAlgorithmInterface { virtual QuicByteCount GetCongestionWindow() const OVERRIDE; virtual bool InSlowStart() const OVERRIDE; virtual QuicByteCount GetSlowStartThreshold() const OVERRIDE; + virtual CongestionControlType GetCongestionControlType() const OVERRIDE; private: scoped_ptr<SendAlgorithmInterface> sender_; // Underlying sender. diff --git a/net/quic/congestion_control/receive_algorithm_interface.cc b/net/quic/congestion_control/receive_algorithm_interface.cc index 5384bdc..f5672f8 100644 --- a/net/quic/congestion_control/receive_algorithm_interface.cc +++ b/net/quic/congestion_control/receive_algorithm_interface.cc @@ -15,9 +15,6 @@ ReceiveAlgorithmInterface* ReceiveAlgorithmInterface::Create( switch (type) { case kTCP: return new TcpReceiver(); - case kTCPBBR: - LOG(DFATAL) << "TCPBBR is not yet supported."; - return NULL; case kInterArrival: LOG(DFATAL) << "InterArrivalSendAlgorithm no longer supported."; return NULL; diff --git a/net/quic/congestion_control/send_algorithm_interface.cc b/net/quic/congestion_control/send_algorithm_interface.cc index 58b92bc..2401667 100644 --- a/net/quic/congestion_control/send_algorithm_interface.cc +++ b/net/quic/congestion_control/send_algorithm_interface.cc @@ -18,18 +18,18 @@ class RttStats; SendAlgorithmInterface* SendAlgorithmInterface::Create( const QuicClock* clock, const RttStats* rtt_stats, - CongestionFeedbackType type, + CongestionControlType congestion_control_type, QuicConnectionStats* stats) { - switch (type) { + switch (congestion_control_type) { case kTCP: return new TcpCubicSender(clock, rtt_stats, kUseReno, kMaxTcpCongestionWindow, stats); case kInterArrival: LOG(DFATAL) << "InterArrivalSendAlgorithm no longer supported."; return NULL; - case kFixRate: + case kFixRateCongestionControl: return new FixRateSender(rtt_stats); - case kTCPBBR: + case kBBR: LOG(DFATAL) << "BbrTcpSender is not supported."; return NULL; } diff --git a/net/quic/congestion_control/send_algorithm_interface.h b/net/quic/congestion_control/send_algorithm_interface.h index 9633ecd..9173afe 100644 --- a/net/quic/congestion_control/send_algorithm_interface.h +++ b/net/quic/congestion_control/send_algorithm_interface.h @@ -29,7 +29,7 @@ class NET_EXPORT_PRIVATE SendAlgorithmInterface { static SendAlgorithmInterface* Create(const QuicClock* clock, const RttStats* rtt_stats, - CongestionFeedbackType type, + CongestionControlType type, QuicConnectionStats* stats); virtual ~SendAlgorithmInterface() {} @@ -101,6 +101,8 @@ class NET_EXPORT_PRIVATE SendAlgorithmInterface { // aka ssthresh. Some send algorithms do not define a slow start // threshold and will return 0. virtual QuicByteCount GetSlowStartThreshold() const = 0; + + virtual CongestionControlType GetCongestionControlType() const = 0; }; } // namespace net diff --git a/net/quic/congestion_control/send_algorithm_simulator.cc b/net/quic/congestion_control/send_algorithm_simulator.cc index 0798d57..5c55f5b 100644 --- a/net/quic/congestion_control/send_algorithm_simulator.cc +++ b/net/quic/congestion_control/send_algorithm_simulator.cc @@ -34,7 +34,8 @@ SendAlgorithmSimulator::Sender::Sender(SendAlgorithmInterface* send_algorithm, min_cwnd(100000), max_cwnd_drop(0), last_cwnd(0), - last_transfer_bandwidth(QuicBandwidth::Zero()) {} + last_transfer_bandwidth(QuicBandwidth::Zero()), + last_transfer_loss_rate(0) {} SendAlgorithmSimulator::SendAlgorithmSimulator( MockClock* clock, @@ -106,30 +107,29 @@ void SendAlgorithmSimulator::TransferBytes(QuicByteCount max_bytes, } SendAlgorithmSimulator::PacketEvent SendAlgorithmSimulator::NextSendEvent() { - QuicTime::Delta send_time = QuicTime::Delta::Infinite(); + QuicTime::Delta next_send_time = QuicTime::Delta::Infinite(); Transfer* transfer = NULL; for (vector<Transfer>::iterator it = pending_transfers_.begin(); it != pending_transfers_.end(); ++it) { - // If the flow hasn't started, return the start time. - if (clock_->Now() < it->start_time) { - send_time = it->start_time.Subtract(clock_->Now()); - transfer = &(*it); - continue; - } // If we've already sent enough bytes, wait for them to be acked. if (it->bytes_acked + it->bytes_in_flight >= it->num_bytes) { continue; } - QuicTime::Delta transfer_send_time = - it->sender->send_algorithm->TimeUntilSend( - clock_->Now(), it->bytes_in_flight, HAS_RETRANSMITTABLE_DATA); - if (transfer_send_time < send_time) { - send_time = transfer_send_time; + // If the flow hasn't started, use the start time. + QuicTime::Delta transfer_send_time = it->start_time.Subtract(clock_->Now()); + if (clock_->Now() >= it->start_time) { + transfer_send_time = + it->sender->send_algorithm->TimeUntilSend( + clock_->Now(), it->bytes_in_flight, HAS_RETRANSMITTABLE_DATA); + } + if (transfer_send_time < next_send_time) { + next_send_time = transfer_send_time; transfer = &(*it); } } - DVLOG(1) << "NextSendTime returning delta(ms):" << send_time.ToMilliseconds(); - return PacketEvent(send_time, transfer); + DVLOG(1) << "NextSendTime returning delta(ms):" + << next_send_time.ToMilliseconds(); + return PacketEvent(next_send_time, transfer); } // NextAck takes into account packet loss in both forward and reverse @@ -146,8 +146,6 @@ SendAlgorithmSimulator::PacketEvent SendAlgorithmSimulator::NextAckEvent() { Transfer* transfer = NULL; for (vector<Transfer>::iterator it = pending_transfers_.begin(); it != pending_transfers_.end(); ++it) { - // If necessary, determine next_acked_. - // This is only done once to ensure multiple calls return the same time. QuicTime::Delta transfer_ack_time = FindNextAcked(&(*it)); if (transfer_ack_time < ack_time) { ack_time = transfer_ack_time; @@ -159,7 +157,7 @@ SendAlgorithmSimulator::PacketEvent SendAlgorithmSimulator::NextAckEvent() { } QuicTime::Delta SendAlgorithmSimulator::FindNextAcked(Transfer* transfer) { - Sender* sender = transfer->sender; + Sender* sender = transfer->sender; if (sender->next_acked == sender->last_acked) { // Determine if the next ack is lost only once, to ensure determinism. lose_next_ack_ = @@ -176,7 +174,7 @@ QuicTime::Delta SendAlgorithmSimulator::FindNextAcked(Transfer* transfer) { } // Lost packets don't trigger an ack. - if (it->ack_time == QuicTime::Zero()) { + if (it->ack_time == QuicTime::Zero()) { packets_lost = true; continue; } @@ -185,6 +183,7 @@ QuicTime::Delta SendAlgorithmSimulator::FindNextAcked(Transfer* transfer) { if (sender->next_acked < it->sequence_number - 1) { packets_lost = true; } + DCHECK_LT(sender->next_acked, it->sequence_number); sender->next_acked = it->sequence_number; if (packets_lost || (sender->next_acked - sender->last_acked) % 2 == 0) { if (two_acks_remaining) { @@ -194,6 +193,10 @@ QuicTime::Delta SendAlgorithmSimulator::FindNextAcked(Transfer* transfer) { } } } + // If the connection has no packets to be acked, return Infinite. + if (sender->next_acked == sender->last_acked) { + return QuicTime::Delta::Infinite(); + } QuicTime::Delta ack_time = QuicTime::Delta::Infinite(); for (list<SentPacket>::const_iterator it = sent_packets_.begin(); @@ -220,30 +223,39 @@ void SendAlgorithmSimulator::HandlePendingAck(Transfer* transfer) { SendAlgorithmInterface::CongestionMap lost_packets; // Some entries may be missing from the sent_packets_ array, if they were // dropped due to buffer overruns. - SentPacket largest_observed = sent_packets_.front(); + SentPacket largest_observed(0, QuicTime::Zero(), QuicTime::Zero(), NULL); + list<SentPacket>::iterator it = sent_packets_.begin(); while (sender->last_acked < sender->next_acked) { ++sender->last_acked; TransmissionInfo info = TransmissionInfo(); info.bytes_sent = kPacketSize; info.in_flight = true; + // Find the next SentPacket for this transfer. + while (it->transfer != transfer) { + DCHECK(it != sent_packets_.end()); + ++it; + } // If it's missing from the array, it's a loss. - if (sent_packets_.front().sequence_number > sender->last_acked) { + if (it->sequence_number > sender->last_acked) { DVLOG(1) << "Lost packet:" << sender->last_acked << " dropped by buffer overflow."; lost_packets[sender->last_acked] = info; continue; } - if (sent_packets_.front().ack_time.IsInitialized()) { + if (it->ack_time.IsInitialized()) { acked_packets[sender->last_acked] = info; } else { lost_packets[sender->last_acked] = info; } - // Remove all packets from the front to next_acked_. - largest_observed = sent_packets_.front(); - sent_packets_.pop_front(); + // This packet has been acked or lost, remove it from sent_packets_. + largest_observed = *it; + sent_packets_.erase(it++); } DCHECK(largest_observed.ack_time.IsInitialized()); + DVLOG(1) << "Updating RTT from send_time:" + << largest_observed.send_time.ToDebuggingValue() << " to ack_time:" + << largest_observed.ack_time.ToDebuggingValue(); sender->rtt_stats->UpdateRtt( largest_observed.ack_time.Subtract(largest_observed.send_time), QuicTime::Delta::Zero(), @@ -257,10 +269,13 @@ void SendAlgorithmSimulator::HandlePendingAck(Transfer* transfer) { sender->RecordStats(); transfer->bytes_acked += acked_packets.size() * kPacketSize; + transfer->bytes_lost += lost_packets.size() * kPacketSize; if (transfer->bytes_acked >= transfer->num_bytes) { // Remove completed transfers and record transfer bandwidth. QuicTime::Delta transfer_time = clock_->Now().Subtract(transfer->start_time); + sender->last_transfer_loss_rate = static_cast<float>(transfer->bytes_lost) / + (transfer->bytes_lost + transfer->bytes_acked); sender->last_transfer_bandwidth = QuicBandwidth::FromBytesAndTimeDelta(transfer->num_bytes, transfer_time); @@ -304,6 +319,9 @@ void SendAlgorithmSimulator::SendDataNow(Transfer* transfer) { if ((sent_packets_.size() + 1) * kPacketSize > bdp) { QuicByteCount qsize = (sent_packets_.size() + 1) * kPacketSize - bdp; ack_time = ack_time.Add(bandwidth_.TransferTime(qsize)); + DVLOG(1) << "Increasing transfer time:" + << bandwidth_.TransferTime(qsize).ToMilliseconds() + << "ms due to qsize:" << qsize; } // If the packet is lost, give it an ack time of Zero. sent_packets_.push_back(SentPacket( diff --git a/net/quic/congestion_control/send_algorithm_simulator.h b/net/quic/congestion_control/send_algorithm_simulator.h index 4941f35..4ecb833 100644 --- a/net/quic/congestion_control/send_algorithm_simulator.h +++ b/net/quic/congestion_control/send_algorithm_simulator.h @@ -8,21 +8,26 @@ #define NET_QUIC_CONGESTION_CONTROL_SEND_ALGORITHM_SIMULATOR_H_ #include <algorithm> +#include <string> #include <vector> #include "base/basictypes.h" +#include "base/format_macros.h" +#include "base/strings/stringprintf.h" #include "net/quic/congestion_control/send_algorithm_interface.h" #include "net/quic/quic_protocol.h" #include "net/quic/quic_time.h" #include "net/quic/test_tools/mock_clock.h" #include "net/quic/test_tools/quic_test_utils.h" +using base::StringPrintf; + namespace net { class SendAlgorithmSimulator { public: struct Sender { - Sender(SendAlgorithmInterface* send_algorithm, RttStats* rtt_stats); + Sender(SendAlgorithmInterface* send_algorithm, RttStats* rtt_stats); void RecordStats() { QuicByteCount cwnd = send_algorithm->GetCongestionWindow(); @@ -34,6 +39,18 @@ class SendAlgorithmSimulator { last_cwnd = cwnd; } + std::string DebugString() { + return StringPrintf("observed goodput(bytes/s):%" PRId64 + " loss rate:%f" + " cwnd:%" PRIu64 + " max_cwnd:%" PRIu64 " min_cwnd:%" PRIu64 + " max_cwnd_drop:%" PRIu64, + last_transfer_bandwidth.ToBytesPerSecond(), + last_transfer_loss_rate, + send_algorithm->GetCongestionWindow(), + max_cwnd, min_cwnd, max_cwnd_drop); + } + SendAlgorithmInterface* send_algorithm; RttStats* rtt_stats; @@ -51,6 +68,7 @@ class SendAlgorithmSimulator { QuicByteCount last_cwnd; QuicBandwidth last_transfer_bandwidth; + float last_transfer_loss_rate; }; struct Transfer { @@ -58,12 +76,14 @@ class SendAlgorithmSimulator { : sender(sender), num_bytes(num_bytes), bytes_acked(0), + bytes_lost(0), bytes_in_flight(0), start_time(start_time) {} Sender* sender; QuicByteCount num_bytes; QuicByteCount bytes_acked; + QuicByteCount bytes_lost; QuicByteCount bytes_in_flight; QuicTime start_time; }; diff --git a/net/quic/congestion_control/tcp_cubic_sender.cc b/net/quic/congestion_control/tcp_cubic_sender.cc index 80f07f5..dd7e7ae 100644 --- a/net/quic/congestion_control/tcp_cubic_sender.cc +++ b/net/quic/congestion_control/tcp_cubic_sender.cc @@ -345,4 +345,8 @@ QuicTime::Delta TcpCubicSender::PrrTimeUntilSend( return QuicTime::Delta::Infinite(); } +CongestionControlType TcpCubicSender::GetCongestionControlType() const { + return reno_ ? kReno : kCubic; +} + } // namespace net diff --git a/net/quic/congestion_control/tcp_cubic_sender.h b/net/quic/congestion_control/tcp_cubic_sender.h index e682a62..28f8302 100644 --- a/net/quic/congestion_control/tcp_cubic_sender.h +++ b/net/quic/congestion_control/tcp_cubic_sender.h @@ -63,6 +63,7 @@ class NET_EXPORT_PRIVATE TcpCubicSender : public SendAlgorithmInterface { virtual QuicByteCount GetCongestionWindow() const OVERRIDE; virtual bool InSlowStart() const OVERRIDE; virtual QuicByteCount GetSlowStartThreshold() const OVERRIDE; + virtual CongestionControlType GetCongestionControlType() const OVERRIDE; // End implementation of SendAlgorithmInterface. private: diff --git a/net/quic/quic_connection.cc b/net/quic/quic_connection.cc index 199c407..7d89917 100644 --- a/net/quic/quic_connection.cc +++ b/net/quic/quic_connection.cc @@ -227,7 +227,7 @@ QuicConnection::QuicConnection(QuicConnectionId connection_id, time_of_last_sent_new_packet_(clock_->ApproximateNow()), sequence_number_of_last_sent_packet_(0), sent_packet_manager_( - is_server, clock_, &stats_, kTCP, + is_server, clock_, &stats_, kCubic, FLAGS_quic_use_time_loss_detection ? kTime : kNack), version_negotiation_state_(START_NEGOTIATION), is_server_(is_server), @@ -246,6 +246,7 @@ QuicConnection::QuicConnection(QuicConnectionId connection_id, framer_.set_visitor(this); framer_.set_received_entropy_calculator(&received_packet_manager_); stats_.connection_creation_time = clock_->ApproximateNow(); + sent_packet_manager_.set_network_change_visitor(&packet_generator_); } QuicConnection::~QuicConnection() { @@ -307,6 +308,9 @@ void QuicConnection::OnPublicResetPacket( debug_visitor_->OnPublicResetPacket(packet); } CloseConnection(QUIC_PUBLIC_RESET, true); + + DVLOG(1) << ENDPOINT << "Connection " << connection_id() + << " closed via QUIC_PUBLIC_RESET from peer."; } bool QuicConnection::OnProtocolVersionMismatch(QuicVersion received_version) { diff --git a/net/quic/quic_connection_logger.cc b/net/quic/quic_connection_logger.cc index 6ac4f55..4fbe6a4 100644 --- a/net/quic/quic_connection_logger.cc +++ b/net/quic/quic_connection_logger.cc @@ -146,10 +146,6 @@ base::Value* NetLogQuicCongestionFeedbackFrameCallback( dict->SetString("type", "TCP"); dict->SetInteger("receive_window", frame->tcp.receive_window); break; - case kTCPBBR: - dict->SetString("type", "TCPBBR"); - // TODO(rtenneti): Add support for BBR. - break; } return dict; diff --git a/net/quic/quic_connection_test.cc b/net/quic/quic_connection_test.cc index 746d6e1..a0587b5 100644 --- a/net/quic/quic_connection_test.cc +++ b/net/quic/quic_connection_test.cc @@ -3945,6 +3945,21 @@ TEST_P(QuicConnectionTest, AckNotifierCallbackAfterFECRecovery) { ProcessFecPacket(2, 1, true, !kEntropyFlag, packet); } +TEST_P(QuicConnectionTest, NetworkChangeVisitorCallbacksChangeFecState) { + QuicPacketCreator* creator = + QuicConnectionPeer::GetPacketCreator(&connection_); + size_t max_packets_per_fec_group = creator->max_packets_per_fec_group(); + + QuicSentPacketManager::NetworkChangeVisitor* visitor = + QuicSentPacketManagerPeer::GetNetworkChangeVisitor( + QuicConnectionPeer::GetSentPacketManager(&connection_)); + EXPECT_TRUE(visitor); + + // Increase FEC group size by increasing congestion window to a large number. + visitor->OnCongestionWindowChange(1000 * kDefaultTCPMSS); + EXPECT_LT(max_packets_per_fec_group, creator->max_packets_per_fec_group()); +} + class MockQuicConnectionDebugVisitor : public QuicConnectionDebugVisitor { public: diff --git a/net/quic/quic_packet_creator.cc b/net/quic/quic_packet_creator.cc index 8a99e88..3b86654 100644 --- a/net/quic/quic_packet_creator.cc +++ b/net/quic/quic_packet_creator.cc @@ -20,6 +20,15 @@ using std::vector; namespace net { +namespace { + +// Default max packets in an FEC group. +static const size_t kDefaultMaxPacketsPerFecGroup = 10; +// Lowest max packets in an FEC group. +static const size_t kLowestMaxPacketsPerFecGroup = 2; + +} // namespace + // A QuicRandom wrapper that gets a bucket of entropy and distributes it // bit-by-bit. Replenishes the bucket as needed. Not thread-safe. Expose this // class if single bit randomness is needed elsewhere. @@ -67,7 +76,7 @@ QuicPacketCreator::QuicPacketCreator(QuicConnectionId connection_id, fec_group_number_(0), send_version_in_packet_(!framer->is_server()), max_packet_length_(kDefaultMaxPacketSize), - max_packets_per_fec_group_(kMaxPacketsPerFecGroup), + max_packets_per_fec_group_(kDefaultMaxPacketsPerFecGroup), connection_id_length_(PACKET_8BYTE_CONNECTION_ID), next_sequence_number_length_(PACKET_1BYTE_SEQUENCE_NUMBER), sequence_number_length_(next_sequence_number_length_), @@ -86,6 +95,13 @@ void QuicPacketCreator::OnBuiltFecProtectedPayload( } } +void QuicPacketCreator::set_max_packets_per_fec_group( + size_t max_packets_per_fec_group) { + max_packets_per_fec_group_ = max(kLowestMaxPacketsPerFecGroup, + max_packets_per_fec_group); + DCHECK_LT(0u, max_packets_per_fec_group_); +} + bool QuicPacketCreator::ShouldSendFec(bool force_close) const { DCHECK(!HasPendingFrames()); return fec_group_.get() != NULL && fec_group_->NumReceivedPackets() > 0 && @@ -94,7 +110,7 @@ bool QuicPacketCreator::ShouldSendFec(bool force_close) const { } bool QuicPacketCreator::IsFecGroupOpen() const { - return ShouldSendFec(true); + return fec_group_.get() != NULL; } void QuicPacketCreator::StartFecProtectingPackets() { diff --git a/net/quic/quic_packet_creator.h b/net/quic/quic_packet_creator.h index ecab1e6..cd6c587 100644 --- a/net/quic/quic_packet_creator.h +++ b/net/quic/quic_packet_creator.h @@ -225,11 +225,10 @@ class NET_EXPORT_PRIVATE QuicPacketCreator : public QuicFecBuilderInterface { } // Sets creator's max number of packets covered by an FEC group. - void set_max_packets_per_fec_group(size_t max_packets_per_fec_group) { - // To turn off FEC protection, use StopFecProtectingPackets(). - DCHECK_NE(0u, max_packets_per_fec_group); - max_packets_per_fec_group_ = max_packets_per_fec_group; - } + // Note: While there are no constraints on |max_packets_per_fec_group|, + // this setter enforces a min value of kLowestMaxPacketsPerFecGroup. + // To turn off FEC protection, use StopFecProtectingPackets(). + void set_max_packets_per_fec_group(size_t max_packets_per_fec_group); private: friend class test::QuicPacketCreatorPeer; diff --git a/net/quic/quic_packet_creator_test.cc b/net/quic/quic_packet_creator_test.cc index ff50d5d..6612c78 100644 --- a/net/quic/quic_packet_creator_test.cc +++ b/net/quic/quic_packet_creator_test.cc @@ -748,23 +748,25 @@ TEST_P(QuicPacketCreatorTest, UpdatePacketSequenceNumberLengthLeastAwaiting) { EXPECT_EQ(PACKET_1BYTE_SEQUENCE_NUMBER, creator_.next_sequence_number_length()); - creator_.set_max_packets_per_fec_group(1); - creator_.set_sequence_number(63); + size_t max_packets_per_fec_group = 10; + creator_.set_max_packets_per_fec_group(max_packets_per_fec_group); + creator_.set_sequence_number(64 - max_packets_per_fec_group); creator_.UpdateSequenceNumberLength(2, 10000); EXPECT_EQ(PACKET_1BYTE_SEQUENCE_NUMBER, creator_.next_sequence_number_length()); - creator_.set_sequence_number(64 * 256 - 1); + creator_.set_sequence_number(64 * 256 - max_packets_per_fec_group); creator_.UpdateSequenceNumberLength(2, 10000); EXPECT_EQ(PACKET_2BYTE_SEQUENCE_NUMBER, creator_.next_sequence_number_length()); - creator_.set_sequence_number(64 * 256 * 256 - 1); + creator_.set_sequence_number(64 * 256 * 256 - max_packets_per_fec_group); creator_.UpdateSequenceNumberLength(2, 10000); EXPECT_EQ(PACKET_4BYTE_SEQUENCE_NUMBER, creator_.next_sequence_number_length()); - creator_.set_sequence_number(GG_UINT64_C(64) * 256 * 256 * 256 * 256 - 1); + creator_.set_sequence_number( + GG_UINT64_C(64) * 256 * 256 * 256 * 256 - max_packets_per_fec_group); creator_.UpdateSequenceNumberLength(2, 10000); EXPECT_EQ(PACKET_6BYTE_SEQUENCE_NUMBER, creator_.next_sequence_number_length()); diff --git a/net/quic/quic_packet_generator.cc b/net/quic/quic_packet_generator.cc index 9e7473e..8906ad9 100644 --- a/net/quic/quic_packet_generator.cc +++ b/net/quic/quic_packet_generator.cc @@ -13,6 +13,19 @@ using base::StringPiece; namespace net { +namespace { + +// We want to put some space between a protected packet and the FEC packet to +// avoid losing them both within the same loss episode. On the other hand, +// we expect to be able to recover from any loss in about an RTT. +// We resolve this tradeoff by sending an FEC packet atmost half an RTT, +// or equivalently, half a cwnd, after the first protected packet. Since we +// don't want to delay an FEC packet past half an RTT, we set the max FEC +// group size to be half the current congestion window. +const float kCongestionWindowMultiplierForFecGroupSize = 0.5; + +} // namespace + class QuicAckNotifier; QuicPacketGenerator::QuicPacketGenerator(QuicConnectionId connection_id, @@ -72,6 +85,14 @@ QuicPacketGenerator::~QuicPacketGenerator() { } } +// NetworkChangeVisitor method. +void QuicPacketGenerator::OnCongestionWindowChange( + QuicByteCount congestion_window) { + packet_creator_.set_max_packets_per_fec_group( + static_cast<size_t>(kCongestionWindowMultiplierForFecGroupSize * + congestion_window / kDefaultTCPMSS)); +} + void QuicPacketGenerator::SetShouldSendAck(bool also_send_feedback, bool also_send_stop_waiting) { should_send_ack_ = true; @@ -166,6 +187,9 @@ QuicConsumedData QuicPacketGenerator::ConsumeData(QuicStreamId id, // Try to close FEC group since we've either run out of data to send or we're // blocked. If not in batch mode, force close the group. + // TODO(jri): This method should be called with flush=false here + // once the timer-based FEC sending is done, to separate FEC sending from + // the end of batch operations. MaybeSendFecPacketAndCloseGroup(!InBatchMode()); DCHECK(InBatchMode() || !packet_creator_.HasPendingFrames()); @@ -228,20 +252,17 @@ void QuicPacketGenerator::MaybeStartFecProtection() { void QuicPacketGenerator::MaybeSendFecPacketAndCloseGroup(bool force) { if (!packet_creator_.IsFecProtected() || - packet_creator_.HasPendingFrames()) { + packet_creator_.HasPendingFrames() || + !packet_creator_.ShouldSendFec(force)) { return; } - - if (packet_creator_.ShouldSendFec(force)) { - // TODO(jri): SerializeFec can return a NULL packet, and this should - // cause an early return, with a call to - // delegate_->OnPacketGenerationError. - SerializedPacket serialized_fec = packet_creator_.SerializeFec(); - DCHECK(serialized_fec.packet); - delegate_->OnSerializedPacket(serialized_fec); - } - - // Turn FEC protection off if the creator does not have an FEC group open. + // TODO(jri): SerializeFec can return a NULL packet, and this should + // cause an early return, with a call to delegate_->OnPacketGenerationError. + SerializedPacket serialized_fec = packet_creator_.SerializeFec(); + DCHECK(serialized_fec.packet); + delegate_->OnSerializedPacket(serialized_fec); + // Turn FEC protection off if creator's protection is on and the creator + // does not have an open FEC group. // Note: We only wait until the frames queued in the creator are flushed; // pending frames in the generator will not keep us from turning FEC off. if (!should_fec_protect_ && !packet_creator_.IsFecGroupOpen()) { diff --git a/net/quic/quic_packet_generator.h b/net/quic/quic_packet_generator.h index d408aeab..77214df 100644 --- a/net/quic/quic_packet_generator.h +++ b/net/quic/quic_packet_generator.h @@ -54,6 +54,7 @@ #define NET_QUIC_QUIC_PACKET_GENERATOR_H_ #include "net/quic/quic_packet_creator.h" +#include "net/quic/quic_sent_packet_manager.h" #include "net/quic/quic_types.h" namespace net { @@ -64,7 +65,8 @@ class QuicPacketGeneratorPeer; class QuicAckNotifier; -class NET_EXPORT_PRIVATE QuicPacketGenerator { +class NET_EXPORT_PRIVATE QuicPacketGenerator + : public QuicSentPacketManager::NetworkChangeVisitor { public: class NET_EXPORT_PRIVATE DelegateInterface { public: @@ -98,6 +100,10 @@ class NET_EXPORT_PRIVATE QuicPacketGenerator { virtual ~QuicPacketGenerator(); + // QuicSentPacketManager::NetworkChangeVisitor methods. + virtual void OnCongestionWindowChange(QuicByteCount congestion_window) + OVERRIDE; + // Indicates that an ACK frame should be sent. If |also_send_feedback| is // true, then it also indicates a CONGESTION_FEEDBACK frame should be sent. // If |also_send_stop_waiting| is true, then it also indicates that a @@ -191,7 +197,7 @@ class NET_EXPORT_PRIVATE QuicPacketGenerator { // Serializes and calls the delegate on an FEC packet if one was under // construction in the creator. When |force| is false, it relies on the // creator being ready to send an FEC packet, otherwise FEC packet is sent - // as long as one is under construction in the creator. Also tries to turns + // as long as one is under construction in the creator. Also tries to turn // off FEC protection in the creator if it's off in the generator. void MaybeSendFecPacketAndCloseGroup(bool force); diff --git a/net/quic/quic_packet_generator_test.cc b/net/quic/quic_packet_generator_test.cc index 331b183..a400833 100644 --- a/net/quic/quic_packet_generator_test.cc +++ b/net/quic/quic_packet_generator_test.cc @@ -563,9 +563,75 @@ TEST_F(QuicPacketGeneratorTest, ConsumeData_FramesPreviouslyQueued) { CheckPacketContains(contents, packet2_); } +TEST_F(QuicPacketGeneratorTest, FecGroupSizeOnCongestionWindowChange) { + delegate_.SetCanWriteAnything(); + creator_->set_max_packets_per_fec_group(50); + EXPECT_EQ(50u, creator_->max_packets_per_fec_group()); + EXPECT_FALSE(creator_->IsFecGroupOpen()); + + // On reduced cwnd. + generator_.OnCongestionWindowChange(7 * kDefaultTCPMSS); + EXPECT_EQ(3u, creator_->max_packets_per_fec_group()); + + // On increased cwnd. + generator_.OnCongestionWindowChange(100 * kDefaultTCPMSS); + EXPECT_EQ(50u, creator_->max_packets_per_fec_group()); + + // On collapsed cwnd. + generator_.OnCongestionWindowChange(1 * kDefaultTCPMSS); + EXPECT_EQ(2u, creator_->max_packets_per_fec_group()); +} + +TEST_F(QuicPacketGeneratorTest, FecGroupSizeChangeWithOpenGroup) { + delegate_.SetCanWriteAnything(); + // TODO(jri): This starting of batch mode should not be required when + // FEC sending is separated from batching operations. + generator_.StartBatchOperations(); + creator_->set_max_packets_per_fec_group(50); + EXPECT_EQ(50u, creator_->max_packets_per_fec_group()); + EXPECT_FALSE(creator_->IsFecGroupOpen()); + + // Send enough data to create 4 packets with MUST_FEC_PROTECT flag. + // 3 packets are sent, one is queued in the creator. + { + InSequence dummy; + EXPECT_CALL(delegate_, OnSerializedPacket(_)).WillOnce( + DoAll(SaveArg<0>(&packet_), Return(true))); + EXPECT_CALL(delegate_, OnSerializedPacket(_)).WillOnce( + DoAll(SaveArg<0>(&packet2_), Return(true))); + EXPECT_CALL(delegate_, OnSerializedPacket(_)).WillOnce( + DoAll(SaveArg<0>(&packet3_), Return(true))); + } + size_t data_len = 3 * kDefaultMaxPacketSize + 1; + QuicConsumedData consumed = generator_.ConsumeData( + 7, CreateData(data_len), 0, true, MUST_FEC_PROTECT, NULL); + EXPECT_EQ(data_len, consumed.bytes_consumed); + EXPECT_TRUE(creator_->IsFecGroupOpen()); + + // Change FEC groupsize. + generator_.OnCongestionWindowChange(2 * kDefaultTCPMSS); + EXPECT_EQ(2u, creator_->max_packets_per_fec_group()); + + // Send enough data to trigger one unprotected data packet, + // causing the FEC packet to also be sent. + { + InSequence dummy; + EXPECT_CALL(delegate_, OnSerializedPacket(_)).WillOnce( + DoAll(SaveArg<0>(&packet4_), Return(true))); + EXPECT_CALL(delegate_, OnSerializedPacket(_)).WillOnce( + DoAll(SaveArg<0>(&packet5_), Return(true))); + } + consumed = generator_.ConsumeData(7, CreateData(kDefaultMaxPacketSize), 0, + true, MAY_FEC_PROTECT, NULL); + EXPECT_EQ(kDefaultMaxPacketSize, consumed.bytes_consumed); + // Verify that one FEC packet was sent. + CheckPacketIsFec(packet5_, /*fec_group=*/1u); + EXPECT_FALSE(creator_->IsFecGroupOpen()); + EXPECT_FALSE(creator_->IsFecProtected()); +} + TEST_F(QuicPacketGeneratorTest, SwitchFecOnOff) { delegate_.SetCanWriteAnything(); - // Enable FEC. creator_->set_max_packets_per_fec_group(2); EXPECT_FALSE(creator_->IsFecProtected()); diff --git a/net/quic/quic_protocol.cc b/net/quic/quic_protocol.cc index 5d883ca..f0b8b68 100644 --- a/net/quic/quic_protocol.cc +++ b/net/quic/quic_protocol.cc @@ -533,10 +533,6 @@ ostream& operator<<(ostream& os, os << " receive_window: " << tcp.receive_window; break; } - case kTCPBBR: { - LOG(DFATAL) << "TCPBBR is not yet supported."; - break; - } } return os; } diff --git a/net/quic/quic_protocol.h b/net/quic/quic_protocol.h index 1039cd1..e0c3c04 100644 --- a/net/quic/quic_protocol.h +++ b/net/quic/quic_protocol.h @@ -110,9 +110,6 @@ const int64 kDefaultMaxTimeForCryptoHandshakeSecs = 5; // 5 secs. // Default ping timeout. const int64 kPingTimeoutSecs = 15; // 15 secs. -// Default max packets in an FEC group. -const size_t kMaxPacketsPerFecGroup = 10; - // We define an unsigned 16-bit floating point value, inspired by IEEE floats // (http://en.wikipedia.org/wiki/Half_precision_floating-point_format), // with 5-bit exponent (bias 1), 11-bit mantissa (effective 12 with hidden @@ -718,7 +715,17 @@ enum CongestionFeedbackType { kTCP, // Used to mimic TCP. kInterArrival, // Use additional inter arrival information. kFixRate, // Provided for testing. - kTCPBBR, // BBR implementation based on TCP congestion feedback. +}; + +// Defines for all types of congestion control algorithms that can be used in +// QUIC. Note that this is separate from the congestion feedback type - +// some congestion control algorithms may use the same feedback type +// (Reno and Cubic are the classic example for that). +enum CongestionControlType { + kCubic, + kReno, + kFixRateCongestionControl, // Provided for testing. + kBBR, }; enum LossDetectionType { diff --git a/net/quic/quic_sent_packet_manager.cc b/net/quic/quic_sent_packet_manager.cc index b5ca9fe..30f34e3 100644 --- a/net/quic/quic_sent_packet_manager.cc +++ b/net/quic/quic_sent_packet_manager.cc @@ -63,18 +63,22 @@ bool HasCryptoHandshake(const TransmissionInfo& transmission_info) { #define ENDPOINT (is_server_ ? "Server: " : " Client: ") -QuicSentPacketManager::QuicSentPacketManager(bool is_server, - const QuicClock* clock, - QuicConnectionStats* stats, - CongestionFeedbackType type, - LossDetectionType loss_type) +QuicSentPacketManager::QuicSentPacketManager( + bool is_server, + const QuicClock* clock, + QuicConnectionStats* stats, + CongestionControlType congestion_control_type, + LossDetectionType loss_type) : unacked_packets_(), is_server_(is_server), clock_(clock), stats_(stats), debug_delegate_(NULL), - send_algorithm_( - SendAlgorithmInterface::Create(clock, &rtt_stats_, type, stats)), + network_change_visitor_(NULL), + send_algorithm_(SendAlgorithmInterface::Create(clock, + &rtt_stats_, + congestion_control_type, + stats)), loss_algorithm_(LossDetectionInterface::Create(loss_type)), largest_observed_(0), first_rto_transmission_(0), @@ -103,7 +107,7 @@ void QuicSentPacketManager::SetFromConfig(const QuicConfig& config) { QuicTime::Delta::FromSeconds(FLAGS_quic_recent_min_rtt_window_s)); } send_algorithm_.reset( - SendAlgorithmInterface::Create(clock_, &rtt_stats_, kTCPBBR, stats_)); + SendAlgorithmInterface::Create(clock_, &rtt_stats_, kBBR, stats_)); } if (config.congestion_feedback() == kPACE || (config.HasReceivedConnectionOptions() && @@ -119,6 +123,10 @@ void QuicSentPacketManager::SetFromConfig(const QuicConfig& config) { loss_algorithm_.reset(LossDetectionInterface::Create(kTime)); } send_algorithm_->SetFromConfig(config, is_server_); + + if (network_change_visitor_ != NULL) { + network_change_visitor_->OnCongestionWindowChange(GetCongestionWindow()); + } } // TODO(ianswett): Combine this method with OnPacketSent once packets are always @@ -207,12 +215,15 @@ void QuicSentPacketManager::OnIncomingAck( void QuicSentPacketManager::MaybeInvokeCongestionEvent( bool rtt_updated, QuicByteCount bytes_in_flight) { - if (rtt_updated || !packets_acked_.empty() || - !packets_lost_.empty()) { - send_algorithm_->OnCongestionEvent( - rtt_updated, bytes_in_flight, packets_acked_, packets_lost_); - packets_acked_.clear(); - packets_lost_.clear(); + if (!rtt_updated && packets_acked_.empty() && packets_lost_.empty()) { + return; + } + send_algorithm_->OnCongestionEvent(rtt_updated, bytes_in_flight, + packets_acked_, packets_lost_); + packets_acked_.clear(); + packets_lost_.clear(); + if (network_change_visitor_ != NULL) { + network_change_visitor_->OnCongestionWindowChange(GetCongestionWindow()); } } @@ -628,6 +639,10 @@ void QuicSentPacketManager::RetransmitAllPackets() { } ++consecutive_rto_count_; } + + if (network_change_visitor_ != NULL) { + network_change_visitor_->OnCongestionWindowChange(GetCongestionWindow()); + } } QuicSentPacketManager::RetransmissionTimeoutMode diff --git a/net/quic/quic_sent_packet_manager.h b/net/quic/quic_sent_packet_manager.h index af40072..4ebe151 100644 --- a/net/quic/quic_sent_packet_manager.h +++ b/net/quic/quic_sent_packet_manager.h @@ -72,6 +72,18 @@ class NET_EXPORT_PRIVATE QuicSentPacketManager { QuicPacketSequenceNumber least_unacked_sent_packet) {} }; + // Interface which gets callbacks from the QuicSentPacketManager when + // network-related state changes. Implementations must not mutate the + // state of the packet manager as a result of these callbacks. + class NET_EXPORT_PRIVATE NetworkChangeVisitor { + public: + virtual ~NetworkChangeVisitor() {} + + // Called when congestion window may have changed. + virtual void OnCongestionWindowChange(QuicByteCount congestion_window) = 0; + // TODO(jri): Add OnRttStatsChange() to this class as well. + }; + // Struct to store the pending retransmission information. struct PendingRetransmission { PendingRetransmission(QuicPacketSequenceNumber sequence_number, @@ -93,7 +105,7 @@ class NET_EXPORT_PRIVATE QuicSentPacketManager { QuicSentPacketManager(bool is_server, const QuicClock* clock, QuicConnectionStats* stats, - CongestionFeedbackType congestion_type, + CongestionControlType congestion_control_type, LossDetectionType loss_type); virtual ~QuicSentPacketManager(); @@ -210,6 +222,12 @@ class NET_EXPORT_PRIVATE QuicSentPacketManager { return largest_observed_; } + void set_network_change_visitor(NetworkChangeVisitor* visitor) { + DCHECK(!network_change_visitor_); + DCHECK(visitor); + network_change_visitor_ = visitor; + } + private: friend class test::QuicConnectionPeer; friend class test::QuicSentPacketManagerPeer; @@ -316,6 +334,7 @@ class NET_EXPORT_PRIVATE QuicSentPacketManager { const QuicClock* clock_; QuicConnectionStats* stats_; DebugDelegate* debug_delegate_; + NetworkChangeVisitor* network_change_visitor_; RttStats rtt_stats_; scoped_ptr<SendAlgorithmInterface> send_algorithm_; scoped_ptr<LossDetectionInterface> loss_algorithm_; diff --git a/net/quic/quic_sent_packet_manager_test.cc b/net/quic/quic_sent_packet_manager_test.cc index 65f086e..90f94f7 100644 --- a/net/quic/quic_sent_packet_manager_test.cc +++ b/net/quic/quic_sent_packet_manager_test.cc @@ -13,6 +13,7 @@ #include "testing/gtest/include/gtest/gtest.h" using std::vector; +using testing::AnyNumber; using testing::ElementsAre; using testing::Pair; using testing::Pointwise; @@ -43,13 +44,15 @@ class MockDebugDelegate : public QuicSentPacketManager::DebugDelegate { class QuicSentPacketManagerTest : public ::testing::TestWithParam<bool> { protected: QuicSentPacketManagerTest() - : manager_(true, &clock_, &stats_, kFixRate, kNack), - send_algorithm_(new StrictMock<MockSendAlgorithm>) { + : manager_(true, &clock_, &stats_, kFixRateCongestionControl, kNack), + send_algorithm_(new StrictMock<MockSendAlgorithm>), + network_change_visitor_(new StrictMock<MockNetworkChangeVisitor>) { QuicSentPacketManagerPeer::SetSendAlgorithm(&manager_, send_algorithm_); // Disable tail loss probes for most tests. QuicSentPacketManagerPeer::SetMaxTailLossProbes(&manager_, 0); // Advance the time 1s so the send times are never QuicTime::Zero. clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1000)); + manager_.set_network_change_visitor(network_change_visitor_.get()); } virtual ~QuicSentPacketManagerTest() OVERRIDE { @@ -89,11 +92,17 @@ class QuicSentPacketManagerTest : public ::testing::TestWithParam<bool> { void ExpectAck(QuicPacketSequenceNumber largest_observed) { EXPECT_CALL(*send_algorithm_, OnCongestionEvent( true, _, ElementsAre(Pair(largest_observed, _)), _)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); } void ExpectUpdatedRtt(QuicPacketSequenceNumber largest_observed) { EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); } void ExpectAckAndLoss(bool rtt_updated, @@ -102,6 +111,9 @@ class QuicSentPacketManagerTest : public ::testing::TestWithParam<bool> { EXPECT_CALL(*send_algorithm_, OnCongestionEvent( rtt_updated, _, ElementsAre(Pair(largest_observed, _)), ElementsAre(Pair(lost_packet, _)))); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); } // |packets_acked| and |packets_lost| should be in sequence number order. @@ -122,6 +134,10 @@ class QuicSentPacketManagerTest : public ::testing::TestWithParam<bool> { OnCongestionEvent(rtt_updated, _, Pointwise(KeyEq(), ack_vector), Pointwise(KeyEq(), lost_vector))); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillRepeatedly(Return(100 * kDefaultTCPMSS)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)). + Times(AnyNumber()); } // Retransmits a packet as though it was a TLP retransmission, because TLP @@ -253,6 +269,7 @@ class QuicSentPacketManagerTest : public ::testing::TestWithParam<bool> { MockClock clock_; QuicConnectionStats stats_; MockSendAlgorithm* send_algorithm_; + scoped_ptr<MockNetworkChangeVisitor> network_change_visitor_; }; TEST_F(QuicSentPacketManagerTest, IsUnacked) { @@ -414,6 +431,9 @@ TEST_F(QuicSentPacketManagerTest, RetransmitTwiceThenAckPreviousBeforeSend) { // Fire the RTO, which will mark 2 for retransmission (but will not send it). EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); manager_.OnRetransmissionTimeout(); EXPECT_TRUE(manager_.HasPendingRetransmissions()); @@ -872,6 +892,9 @@ TEST_F(QuicSentPacketManagerTest, TailLossProbeThenRTO) { // The final RTO abandons all of them. EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); manager_.OnRetransmissionTimeout(); EXPECT_TRUE(manager_.HasPendingRetransmissions()); EXPECT_EQ(2u, stats_.tlp_count); @@ -1123,6 +1146,9 @@ TEST_F(QuicSentPacketManagerTest, RetransmissionTimeout) { } EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); EXPECT_FALSE(manager_.MaybeRetransmitTailLossProbe()); manager_.OnRetransmissionTimeout(); } @@ -1214,6 +1240,9 @@ TEST_F(QuicSentPacketManagerTest, GetTransmissionTimeRTO) { EXPECT_EQ(expected_time, manager_.GetRetransmissionTime()); // Retransmit the packet by invoking the retransmission timeout. + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); clock_.AdvanceTime(expected_rto_delay); manager_.OnRetransmissionTimeout(); @@ -1257,6 +1286,9 @@ TEST_F(QuicSentPacketManagerTest, GetTransmissionDelayMin) { EXPECT_EQ(delay, QuicSentPacketManagerPeer::GetRetransmissionDelay(&manager_)); delay = delay.Add(delay); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); manager_.OnRetransmissionTimeout(); RetransmitNextPacket(i + 2); @@ -1282,6 +1314,9 @@ TEST_F(QuicSentPacketManagerTest, GetTransmissionDelay) { EXPECT_EQ(delay, QuicSentPacketManagerPeer::GetRetransmissionDelay(&manager_)); delay = delay.Add(delay); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(2 * kDefaultTCPMSS)); EXPECT_CALL(*send_algorithm_, OnRetransmissionTimeout(true)); manager_.OnRetransmissionTimeout(); RetransmitNextPacket(i + 2); @@ -1327,6 +1362,9 @@ TEST_F(QuicSentPacketManagerTest, NegotiateTimeLossDetection) { QuicConfig config; QuicConfigPeer::SetReceivedLossDetection(&config, kTIME); EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); manager_.SetFromConfig(config); EXPECT_EQ(kTime, @@ -1344,6 +1382,9 @@ TEST_F(QuicSentPacketManagerTest, NegotiateTimeLossDetectionFromOptions) { options.push_back(kTIME); QuicConfigPeer::SetReceivedConnectionOptions(&config, options); EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); manager_.SetFromConfig(config); EXPECT_EQ(kTime, @@ -1359,6 +1400,9 @@ TEST_F(QuicSentPacketManagerTest, NegotiatePacingFromOptions) { QuicTagVector options; options.push_back(kPACE); QuicConfigPeer::SetReceivedConnectionOptions(&config, options); + EXPECT_CALL(*network_change_visitor_, OnCongestionWindowChange(_)); + EXPECT_CALL(*send_algorithm_, GetCongestionWindow()) + .WillOnce(Return(100 * kDefaultTCPMSS)); EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)); manager_.SetFromConfig(config); diff --git a/net/quic/test_tools/quic_sent_packet_manager_peer.cc b/net/quic/test_tools/quic_sent_packet_manager_peer.cc index 786e63c..2b2fcb9 100644 --- a/net/quic/test_tools/quic_sent_packet_manager_peer.cc +++ b/net/quic/test_tools/quic_sent_packet_manager_peer.cc @@ -33,6 +33,13 @@ const LossDetectionInterface* QuicSentPacketManagerPeer::GetLossAlgorithm( } // static +const SendAlgorithmInterface* +QuicSentPacketManagerPeer::GetCongestionControlAlgorithm( + QuicSentPacketManager* sent_packet_manager) { + return sent_packet_manager->send_algorithm_.get(); +} + +// static void QuicSentPacketManagerPeer::SetLossAlgorithm( QuicSentPacketManager* sent_packet_manager, LossDetectionInterface* loss_detector) { @@ -126,5 +133,12 @@ QuicByteCount QuicSentPacketManagerPeer::GetBytesInFlight( return sent_packet_manager->unacked_packets_.bytes_in_flight(); } +// static +QuicSentPacketManager::NetworkChangeVisitor* + QuicSentPacketManagerPeer::GetNetworkChangeVisitor( + const QuicSentPacketManager* sent_packet_manager) { + return sent_packet_manager->network_change_visitor_; +} + } // namespace test } // namespace net diff --git a/net/quic/test_tools/quic_sent_packet_manager_peer.h b/net/quic/test_tools/quic_sent_packet_manager_peer.h index fed8015..77fba0b 100644 --- a/net/quic/test_tools/quic_sent_packet_manager_peer.h +++ b/net/quic/test_tools/quic_sent_packet_manager_peer.h @@ -25,6 +25,9 @@ class QuicSentPacketManagerPeer { static const LossDetectionInterface* GetLossAlgorithm( QuicSentPacketManager* sent_packet_manager); + static const SendAlgorithmInterface* GetCongestionControlAlgorithm( + QuicSentPacketManager* sent_packet_manager); + static void SetLossAlgorithm(QuicSentPacketManager* sent_packet_manager, LossDetectionInterface* loss_detector); @@ -63,6 +66,9 @@ class QuicSentPacketManagerPeer { static QuicByteCount GetBytesInFlight( const QuicSentPacketManager* sent_packet_manager); + static QuicSentPacketManager::NetworkChangeVisitor* GetNetworkChangeVisitor( + const QuicSentPacketManager* sent_packet_manager); + private: DISALLOW_COPY_AND_ASSIGN(QuicSentPacketManagerPeer); }; diff --git a/net/quic/test_tools/quic_test_utils.cc b/net/quic/test_tools/quic_test_utils.cc index 36d0e78..62a891b 100644 --- a/net/quic/test_tools/quic_test_utils.cc +++ b/net/quic/test_tools/quic_test_utils.cc @@ -368,6 +368,12 @@ MockAckNotifierDelegate::MockAckNotifierDelegate() { MockAckNotifierDelegate::~MockAckNotifierDelegate() { } +MockNetworkChangeVisitor::MockNetworkChangeVisitor() { +} + +MockNetworkChangeVisitor::~MockNetworkChangeVisitor() { +} + namespace { string HexDumpWithMarks(const char* data, int length, diff --git a/net/quic/test_tools/quic_test_utils.h b/net/quic/test_tools/quic_test_utils.h index bae8050..631d59f 100644 --- a/net/quic/test_tools/quic_test_utils.h +++ b/net/quic/test_tools/quic_test_utils.h @@ -17,6 +17,7 @@ #include "net/quic/quic_client_session_base.h" #include "net/quic/quic_connection.h" #include "net/quic/quic_framer.h" +#include "net/quic/quic_sent_packet_manager.h" #include "net/quic/quic_session.h" #include "net/quic/test_tools/mock_clock.h" #include "net/quic/test_tools/mock_random.h" @@ -464,6 +465,7 @@ class MockSendAlgorithm : public SendAlgorithmInterface { MOCK_CONST_METHOD0(GetCongestionWindow, QuicByteCount()); MOCK_CONST_METHOD0(InSlowStart, bool()); MOCK_CONST_METHOD0(GetSlowStartThreshold, QuicByteCount()); + MOCK_CONST_METHOD0(GetCongestionControlType, CongestionControlType()); private: DISALLOW_COPY_AND_ASSIGN(MockSendAlgorithm); @@ -530,6 +532,18 @@ class MockAckNotifierDelegate : public QuicAckNotifier::DelegateInterface { DISALLOW_COPY_AND_ASSIGN(MockAckNotifierDelegate); }; +class MockNetworkChangeVisitor : + public QuicSentPacketManager::NetworkChangeVisitor { + public: + MockNetworkChangeVisitor(); + virtual ~MockNetworkChangeVisitor(); + + MOCK_METHOD1(OnCongestionWindowChange, void(QuicByteCount)); + + private: + DISALLOW_COPY_AND_ASSIGN(MockNetworkChangeVisitor); +}; + } // namespace test } // namespace net diff --git a/net/tools/quic/end_to_end_test.cc b/net/tools/quic/end_to_end_test.cc index c11e069..dd1b28b 100644 --- a/net/tools/quic/end_to_end_test.cc +++ b/net/tools/quic/end_to_end_test.cc @@ -683,7 +683,6 @@ TEST_P(EndToEndTest, LargePostFec) { QuicPacketCreator* creator = QuicConnectionPeer::GetPacketCreator( client_->client()->session()->connection()); EXPECT_TRUE(creator->IsFecEnabled()); - EXPECT_EQ(kMaxPacketsPerFecGroup, creator->max_packets_per_fec_group()); // Set FecPolicy to always protect data on all streams. client_->SetFecPolicy(FEC_PROTECT_ALWAYS); @@ -716,7 +715,6 @@ TEST_P(EndToEndTest, ClientSpecifiedFecProtectionForHeaders) { QuicPacketCreator* creator = QuicConnectionPeer::GetPacketCreator( client_->client()->session()->connection()); EXPECT_TRUE(creator->IsFecEnabled()); - EXPECT_EQ(kMaxPacketsPerFecGroup, creator->max_packets_per_fec_group()); // Verify that server headers stream is FEC protected. server_thread_->Pause(); |