summaryrefslogtreecommitdiffstats
path: root/sync
diff options
context:
space:
mode:
authordcheng@chromium.org <dcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-09 04:46:46 +0000
committerdcheng@chromium.org <dcheng@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-09 04:46:46 +0000
commit191af5694ac0cd7cffe848ac3bcf23f4c00c9474 (patch)
tree1d054d2269efd0a5a3a004e5453420f4e9b52bec /sync
parentc57851b8409586f14e9cee46144786d1f91f234a (diff)
downloadchromium_src-191af5694ac0cd7cffe848ac3bcf23f4c00c9474.zip
chromium_src-191af5694ac0cd7cffe848ac3bcf23f4c00c9474.tar.gz
chromium_src-191af5694ac0cd7cffe848ac3bcf23f4c00c9474.tar.bz2
Implement a class to manage backoff timers for local invalidation reminders.
BUG=124149 Review URL: https://chromiumcodereview.appspot.com/11298002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@166854 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync')
-rw-r--r--sync/notifier/DEPS1
-rw-r--r--sync/notifier/ack_tracker.cc225
-rw-r--r--sync/notifier/ack_tracker.h107
-rw-r--r--sync/notifier/ack_tracker_unittest.cc359
-rw-r--r--sync/sync.gyp3
5 files changed, 695 insertions, 0 deletions
diff --git a/sync/notifier/DEPS b/sync/notifier/DEPS
index c59f7bf..202fdc9 100644
--- a/sync/notifier/DEPS
+++ b/sync/notifier/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"+google/cacheinvalidation",
"+jingle/notifier",
+ "+net/base/backoff_entry.h",
"+net/base/mock_host_resolver.h",
"+net/url_request/url_request_context.h",
"+net/url_request/url_request_test_util.h",
diff --git a/sync/notifier/ack_tracker.cc b/sync/notifier/ack_tracker.cc
new file mode 100644
index 0000000..4014672
--- /dev/null
+++ b/sync/notifier/ack_tracker.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/ack_tracker.h"
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/stl_util.h"
+#include "google/cacheinvalidation/include/types.h"
+
+namespace syncer {
+
+namespace {
+
+// All times are in milliseconds.
+const net::BackoffEntry::Policy kDefaultBackoffPolicy = {
+ // Number of initial errors (in sequence) to ignore before applying
+ // exponential back-off rules.
+ // Note this value is set to 1 to work in conjunction with a hack in
+ // AckTracker::Track.
+ 1,
+
+ // Initial delay. The interpretation of this value depends on
+ // always_use_initial_delay. It's either how long we wait between
+ // requests before backoff starts, or how much we delay the first request
+ // after backoff starts.
+ 60 * 1000,
+
+ // Factor by which the waiting time will be multiplied.
+ 2,
+
+ // Fuzzing percentage. ex: 10% will spread requests randomly
+ // between 90%-100% of the calculated time.
+ 0,
+
+ // Maximum amount of time we are willing to delay our request, -1
+ // for no maximum.
+ 60 * 10 * 1000,
+
+ // Time to keep an entry from being discarded even when it
+ // has no significant state, -1 to never discard.
+ -1,
+
+ // If true, we always use a delay of initial_delay_ms, even before
+ // we've seen num_errors_to_ignore errors. Otherwise, initial_delay_ms
+ // is the first delay once we start exponential backoff.
+ //
+ // So if we're ignoring 1 error, we'll see (N, N, Nm, Nm^2, ...) if true,
+ // and (0, 0, N, Nm, ...) when false, where N is initial_backoff_ms and
+ // m is multiply_factor, assuming we've already seen one success.
+ true,
+};
+
+scoped_ptr<net::BackoffEntry> CreateDefaultBackoffEntry(
+ const net::BackoffEntry::Policy* const policy) {
+ return scoped_ptr<net::BackoffEntry>(new net::BackoffEntry(policy));
+}
+
+} // namespace
+
+AckTracker::Delegate::~Delegate() {
+}
+
+AckTracker::Entry::Entry(scoped_ptr<net::BackoffEntry> backoff,
+ const ObjectIdSet& ids)
+ : backoff(backoff.Pass()), ids(ids) {
+}
+
+AckTracker::Entry::~Entry() {
+}
+
+AckTracker::AckTracker(Delegate* delegate)
+ : now_callback_(base::Bind(&base::TimeTicks::Now)),
+ create_backoff_entry_callback_(base::Bind(&CreateDefaultBackoffEntry)),
+ delegate_(delegate) {
+ DCHECK(delegate_);
+}
+
+AckTracker::~AckTracker() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Clear();
+}
+
+void AckTracker::Clear() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ timer_.Stop();
+ STLDeleteValues(&queue_);
+}
+
+void AckTracker::Track(const ObjectIdSet& ids) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ scoped_ptr<Entry> entry(new Entry(
+ create_backoff_entry_callback_.Run(&kDefaultBackoffPolicy), ids));
+ // This is a small hack. When net::BackoffRequest is first created,
+ // GetReleaseTime() always returns the default base::TimeTicks value: 0.
+ // In order to work around that, we mark it as failed right away.
+ entry->backoff->InformOfRequest(false /* succeeded */);
+ const base::TimeTicks release_time = entry->backoff->GetReleaseTime();
+ queue_.insert(std::make_pair(release_time, entry.release()));
+ NudgeTimer();
+}
+
+void AckTracker::Ack(const ObjectIdSet& ids) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // We could be clever and maintain a mapping of object IDs to their position
+ // in the multimap, but that makes things a lot more complicated.
+ for (std::multimap<base::TimeTicks, Entry*>::iterator it = queue_.begin();
+ it != queue_.end(); ) {
+ ObjectIdSet remaining_ids;
+ std::set_difference(it->second->ids.begin(), it->second->ids.end(),
+ ids.begin(), ids.end(),
+ std::inserter(remaining_ids, remaining_ids.begin()),
+ ids.value_comp());
+ it->second->ids.swap(remaining_ids);
+ if (it->second->ids.empty()) {
+ std::multimap<base::TimeTicks, Entry*>::iterator erase_it = it;
+ ++it;
+ delete erase_it->second;
+ queue_.erase(erase_it);
+ } else {
+ ++it;
+ }
+ }
+ NudgeTimer();
+}
+
+void AckTracker::NudgeTimer() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (queue_.empty()) {
+ return;
+ }
+
+ const base::TimeTicks now = now_callback_.Run();
+ // There are two cases when the timer needs to be started:
+ // 1. |desired_run_time_| is in the past. By definition, the timer has already
+ // fired at this point. Since the queue is non-empty, we need to set the
+ // timer to fire again.
+ // 2. The timer is already running but we need it to fire sooner if the first
+ // entry's timeout occurs before |desired_run_time_|.
+ if (desired_run_time_ <= now || queue_.begin()->first < desired_run_time_) {
+ base::TimeDelta delay = queue_.begin()->first - now;
+ if (delay < base::TimeDelta()) {
+ delay = base::TimeDelta();
+ }
+ timer_.Start(FROM_HERE, delay, this, &AckTracker::OnTimeout);
+ desired_run_time_ = queue_.begin()->first;
+ }
+}
+
+void AckTracker::OnTimeout() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ OnTimeoutAt(now_callback_.Run());
+}
+
+void AckTracker::OnTimeoutAt(base::TimeTicks now) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (queue_.empty())
+ return;
+
+ ObjectIdSet expired_ids;
+ std::multimap<base::TimeTicks, Entry*>::iterator end =
+ queue_.upper_bound(now);
+ std::vector<Entry*> expired_entries;
+ for (std::multimap<base::TimeTicks, Entry*>::iterator it = queue_.begin();
+ it != end; ++it) {
+ expired_ids.insert(it->second->ids.begin(), it->second->ids.end());
+ it->second->backoff->InformOfRequest(false /* succeeded */);
+ expired_entries.push_back(it->second);
+ }
+ queue_.erase(queue_.begin(), end);
+ for (std::vector<Entry*>::const_iterator it = expired_entries.begin();
+ it != expired_entries.end(); ++it) {
+ queue_.insert(std::make_pair((*it)->backoff->GetReleaseTime(), *it));
+ }
+ delegate_->OnTimeout(expired_ids);
+ NudgeTimer();
+}
+
+// Testing helpers.
+void AckTracker::SetNowCallbackForTest(
+ const NowCallback& now_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ now_callback_ = now_callback;
+}
+
+void AckTracker::SetCreateBackoffEntryCallbackForTest(
+ const CreateBackoffEntryCallback& create_backoff_entry_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ create_backoff_entry_callback_ = create_backoff_entry_callback;
+}
+
+bool AckTracker::TriggerTimeoutAtForTest(base::TimeTicks now) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ bool no_timeouts_before_now = (queue_.lower_bound(now) == queue_.begin());
+ OnTimeoutAt(now);
+ return no_timeouts_before_now;
+}
+
+bool AckTracker::IsQueueEmptyForTest() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ return queue_.empty();
+}
+
+const base::Timer& AckTracker::GetTimerForTest() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ return timer_;
+}
+
+} // namespace syncer
diff --git a/sync/notifier/ack_tracker.h b/sync/notifier/ack_tracker.h
new file mode 100644
index 0000000..1609ac9
--- /dev/null
+++ b/sync/notifier/ack_tracker.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_NOTIFIER_ACK_TRACKER_H_
+#define SYNC_NOTIFIER_ACK_TRACKER_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/callback_forward.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time.h"
+#include "base/timer.h"
+#include "net/base/backoff_entry.h"
+#include "sync/notifier/invalidation_util.h"
+
+namespace syncer {
+
+// A simple class that tracks sets of object IDs that have not yet been
+// acknowledged. Internally, it manages timeouts for the tracked object IDs and
+// periodically triggers a callback for each timeout period. The timeout is a
+// simple exponentially increasing time that starts at 60 seconds and is capped
+// at 600 seconds.
+class AckTracker {
+ public:
+ class Delegate {
+ public:
+ virtual ~Delegate();
+
+ // |ids| contains all object IDs that have timed out in this time interval.
+ virtual void OnTimeout(const ObjectIdSet& ids) = 0;
+ };
+
+ typedef base::Callback<base::TimeTicks()> NowCallback;
+ typedef base::Callback<scoped_ptr<net::BackoffEntry>(
+ const net::BackoffEntry::Policy* const)> CreateBackoffEntryCallback;
+
+ explicit AckTracker(Delegate* delegate);
+ ~AckTracker();
+
+ // Equivalent to calling Ack() on all currently registered object IDs.
+ void Clear();
+
+ // Starts tracking timeouts for |ids|. Timeouts will be triggered for each
+ // object ID until it is acknowledged. Note that no de-duplication is
+ // performed; calling Track() twice on the same set of ids will result in two
+ // different timeouts being triggered for those ids.
+ void Track(const ObjectIdSet& ids);
+ // Marks a set of |ids| as acknowledged.
+ void Ack(const ObjectIdSet& ids);
+
+ // Testing methods.
+ void SetNowCallbackForTest(const NowCallback& now_callback);
+ void SetCreateBackoffEntryCallbackForTest(
+ const CreateBackoffEntryCallback& create_backoff_entry_callback);
+ // Returns true iff there are no timeouts scheduled to occur before |now|.
+ // Used in testing to make sure we don't have timeouts set to expire before
+ // when they should.
+ bool TriggerTimeoutAtForTest(base::TimeTicks now);
+ bool IsQueueEmptyForTest() const;
+ const base::Timer& GetTimerForTest() const;
+
+ private:
+ struct Entry {
+ Entry(scoped_ptr<net::BackoffEntry> backoff, const ObjectIdSet& ids);
+ ~Entry();
+
+ scoped_ptr<net::BackoffEntry> backoff;
+ ObjectIdSet ids;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Entry);
+ };
+
+ void NudgeTimer();
+ void OnTimeout();
+ void OnTimeoutAt(base::TimeTicks now);
+
+ static scoped_ptr<net::BackoffEntry> DefaultCreateBackoffEntryStrategy(
+ const net::BackoffEntry::Policy* const policy);
+
+ // Used for testing purposes.
+ NowCallback now_callback_;
+ CreateBackoffEntryCallback create_backoff_entry_callback_;
+
+ Delegate* const delegate_;
+
+ base::OneShotTimer<AckTracker> timer_;
+ // The time that the timer should fire at. We use this to determine if we need
+ // to start or update |timer_| in NudgeTimer(). We can't simply use
+ // timer_.desired_run_time() for this purpose because it always uses
+ // base::TimeTicks::Now() as a reference point when Timer::Start() is called,
+ // while NudgeTimer() needs a fixed reference point to avoid unnecessarily
+ // updating the timer.
+ base::TimeTicks desired_run_time_;
+ std::multimap<base::TimeTicks, Entry*> queue_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AckTracker);
+};
+
+} // namespace syncer
+
+#endif // SYNC_NOTIFIER_ACK_TRACKER_H_
diff --git a/sync/notifier/ack_tracker_unittest.cc b/sync/notifier/ack_tracker_unittest.cc
new file mode 100644
index 0000000..9a5c802
--- /dev/null
+++ b/sync/notifier/ack_tracker_unittest.cc
@@ -0,0 +1,359 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/notifier/ack_tracker.h"
+
+#include "base/memory/ref_counted.h"
+#include "base/message_loop.h"
+#include "google/cacheinvalidation/include/types.h"
+#include "google/cacheinvalidation/types.pb.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+
+namespace {
+
+typedef AckTracker::NowCallback NowCallback;
+
+class MockTimeProvider : public base::RefCountedThreadSafe<MockTimeProvider> {
+ public:
+ MockTimeProvider() : fake_now_(base::TimeTicks::Now()) {}
+
+ void LeapForward(int seconds) {
+ ASSERT_GT(seconds, 0);
+ fake_now_ += base::TimeDelta::FromSeconds(seconds);
+ }
+
+ // After the next call to Now(), immediately leap forward by |seconds|.
+ void DelayedLeapForward(int seconds) {
+ ASSERT_GT(seconds, 0);
+ delayed_leap_ = base::TimeDelta::FromSeconds(seconds);
+ }
+
+ base::TimeTicks Now() {
+ base::TimeTicks fake_now = fake_now_;
+ if (delayed_leap_ > base::TimeDelta()) {
+ fake_now_ += delayed_leap_;
+ delayed_leap_ = base::TimeDelta();
+ }
+ return fake_now;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<MockTimeProvider>;
+
+ ~MockTimeProvider() {}
+
+ base::TimeTicks fake_now_;
+ base::TimeDelta delayed_leap_;
+};
+
+class FakeBackoffEntry : public net::BackoffEntry {
+ public:
+ FakeBackoffEntry(const Policy* const policy, const NowCallback& now_callback)
+ : BackoffEntry(policy),
+ now_callback_(now_callback) {
+ }
+
+ protected:
+ virtual base::TimeTicks ImplGetTimeNow() const OVERRIDE {
+ return now_callback_.Run();
+ }
+
+ private:
+ NowCallback now_callback_;
+};
+
+class MockDelegate : public AckTracker::Delegate {
+ public:
+ MOCK_METHOD1(OnTimeout, void(const ObjectIdSet&));
+};
+
+scoped_ptr<net::BackoffEntry> CreateMockEntry(
+ const NowCallback& now_callback,
+ const net::BackoffEntry::Policy* const policy) {
+ return scoped_ptr<net::BackoffEntry>(new FakeBackoffEntry(
+ policy, now_callback));
+}
+
+} // namespace
+
+class AckTrackerTest : public testing::Test {
+ public:
+ AckTrackerTest()
+ : time_provider_(new MockTimeProvider),
+ ack_tracker_(&delegate_),
+ kIdOne(ipc::invalidation::ObjectSource::TEST, "one"),
+ kIdTwo(ipc::invalidation::ObjectSource::TEST, "two") {
+ ack_tracker_.SetNowCallbackForTest(
+ base::Bind(&MockTimeProvider::Now, time_provider_));
+ ack_tracker_.SetCreateBackoffEntryCallbackForTest(
+ base::Bind(&CreateMockEntry,
+ base::Bind(&MockTimeProvider::Now,
+ time_provider_)));
+ }
+
+ protected:
+ bool TriggerTimeoutNow() {
+ return ack_tracker_.TriggerTimeoutAtForTest(time_provider_->Now());
+ }
+
+ base::TimeDelta GetTimerDelay() const {
+ const base::Timer& timer = ack_tracker_.GetTimerForTest();
+ if (!timer.IsRunning())
+ ADD_FAILURE() << "Timer is not running!";
+ return timer.GetCurrentDelay();
+ }
+
+ scoped_refptr<MockTimeProvider> time_provider_;
+ ::testing::StrictMock<MockDelegate> delegate_;
+ AckTracker ack_tracker_;
+
+ const invalidation::ObjectId kIdOne;
+ const invalidation::ObjectId kIdTwo;
+
+ // AckTracker uses base::Timer internally, which depends on the existence of a
+ // MessageLoop.
+ MessageLoop message_loop_;
+};
+
+// Tests that various combinations of Track()/Ack() behave as
+// expected.
+TEST_F(AckTrackerTest, TrackAndAck) {
+ ObjectIdSet ids_one;
+ ids_one.insert(kIdOne);
+ ObjectIdSet ids_two;
+ ids_two.insert(kIdTwo);
+ ObjectIdSet ids_all;
+ ids_all.insert(kIdOne);
+ ids_all.insert(kIdTwo);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids_one);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids_two);
+ ack_tracker_.Ack(ids_one);
+ ack_tracker_.Ack(ids_two);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+
+ ack_tracker_.Track(ids_all);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Ack(ids_one);
+ ack_tracker_.Ack(ids_two);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+
+ ack_tracker_.Track(ids_one);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids_two);
+ ack_tracker_.Ack(ids_all);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+
+ ack_tracker_.Track(ids_all);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Ack(ids_all);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+TEST_F(AckTrackerTest, DoubleTrack) {
+ ObjectIdSet ids;
+ ids.insert(kIdOne);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids);
+ ack_tracker_.Ack(ids);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+TEST_F(AckTrackerTest, UntrackedAck) {
+ ObjectIdSet ids;
+ ids.insert(kIdOne);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Ack(ids);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+TEST_F(AckTrackerTest, Clear) {
+ ObjectIdSet ids;
+ ids.insert(kIdOne);
+ ids.insert(kIdOne);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Clear();
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+// Test that timeout behavior for one object ID. The timeout should increase
+// exponentially until it hits the cap.
+TEST_F(AckTrackerTest, SimpleTimeout) {
+ ObjectIdSet ids;
+ ids.insert(kIdOne);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
+ time_provider_->LeapForward(60);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
+ time_provider_->LeapForward(120);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(240), GetTimerDelay());
+ time_provider_->LeapForward(240);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(480), GetTimerDelay());
+ time_provider_->LeapForward(480);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(600), GetTimerDelay());
+ time_provider_->LeapForward(600);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(600), GetTimerDelay());
+ time_provider_->LeapForward(600);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Ack(ids);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+
+ // The backoff time should be reset after an Ack/Track cycle.
+ ack_tracker_.Track(ids);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
+ time_provider_->LeapForward(60);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Ack(ids);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+// Tests that a sequence of Track() calls that results in interleaving
+// timeouts occurs as expected.
+TEST_F(AckTrackerTest, InterleavedTimeout) {
+ ObjectIdSet ids_one;
+ ids_one.insert(kIdOne);
+ ObjectIdSet ids_two;
+ ids_two.insert(kIdTwo);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids_one);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ time_provider_->LeapForward(30);
+ ack_tracker_.Track(ids_two);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
+ time_provider_->LeapForward(30);
+ EXPECT_CALL(delegate_, OnTimeout(ids_one));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
+ time_provider_->LeapForward(30);
+ EXPECT_CALL(delegate_, OnTimeout(ids_two));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(90), GetTimerDelay());
+ time_provider_->LeapForward(90);
+ EXPECT_CALL(delegate_, OnTimeout(ids_one));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
+ time_provider_->LeapForward(30);
+ EXPECT_CALL(delegate_, OnTimeout(ids_two));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ ack_tracker_.Ack(ids_one);
+ ack_tracker_.Ack(ids_two);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+// Tests that registering a new object ID properly shortens the timeout when
+// needed.
+TEST_F(AckTrackerTest, ShortenTimeout) {
+ ObjectIdSet ids_one;
+ ids_one.insert(kIdOne);
+ ObjectIdSet ids_two;
+ ids_two.insert(kIdTwo);
+
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids_one);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
+ time_provider_->LeapForward(60);
+ EXPECT_CALL(delegate_, OnTimeout(ids_one));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ // Without this next register, the next timeout should occur in 120 seconds
+ // from the last timeout event.
+ EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
+ time_provider_->LeapForward(30);
+ ack_tracker_.Track(ids_two);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ // Now that we've registered another entry though, we should receive a timeout
+ // in 60 seconds.
+ EXPECT_EQ(base::TimeDelta::FromSeconds(60), GetTimerDelay());
+ time_provider_->LeapForward(60);
+ EXPECT_CALL(delegate_, OnTimeout(ids_two));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ // Verify that the original timeout for kIdOne still occurs as expected.
+ EXPECT_EQ(base::TimeDelta::FromSeconds(30), GetTimerDelay());
+ time_provider_->LeapForward(30);
+ EXPECT_CALL(delegate_, OnTimeout(ids_one));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ ack_tracker_.Ack(ids_one);
+ ack_tracker_.Ack(ids_two);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+// Tests that a delay between inserting a new object ID registration and start
+// the timer that is greater than the initial timeout period (60 seconds) does
+// not break things. This could happen on a heavily loaded system, for instance.
+TEST_F(AckTrackerTest, ImmediateTimeout) {
+ ObjectIdSet ids;
+ ids.insert(kIdOne);
+
+ time_provider_->DelayedLeapForward(90);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+ ack_tracker_.Track(ids);
+ EXPECT_FALSE(ack_tracker_.IsQueueEmptyForTest());
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(0), GetTimerDelay());
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ message_loop_.RunUntilIdle();
+
+ // The next timeout should still be scheduled normally.
+ EXPECT_EQ(base::TimeDelta::FromSeconds(120), GetTimerDelay());
+ time_provider_->LeapForward(120);
+ EXPECT_CALL(delegate_, OnTimeout(ids));
+ EXPECT_TRUE(TriggerTimeoutNow());
+
+ ack_tracker_.Ack(ids);
+ EXPECT_TRUE(ack_tracker_.IsQueueEmptyForTest());
+}
+
+} // namespace syncer
diff --git a/sync/sync.gyp b/sync/sync.gyp
index e74aee4..3f75837 100644
--- a/sync/sync.gyp
+++ b/sync/sync.gyp
@@ -275,6 +275,8 @@
'conditions': [
['OS != "android"', {
'sources': [
+ 'notifier/ack_tracker.cc',
+ 'notifier/ack_tracker.h',
'notifier/invalidation_notifier.cc',
'notifier/invalidation_notifier.h',
'notifier/invalidation_state_tracker.h',
@@ -695,6 +697,7 @@
'conditions': [
['OS != "android"', {
'sources': [
+ 'notifier/ack_tracker_unittest.cc',
'notifier/fake_invalidator_unittest.cc',
'notifier/invalidation_notifier_unittest.cc',
'notifier/invalidator_registrar_unittest.cc',