summaryrefslogtreecommitdiffstats
path: root/sync
diff options
context:
space:
mode:
Diffstat (limited to 'sync')
-rw-r--r--sync/engine/build_commit_command.cc64
-rw-r--r--sync/engine/build_commit_command.h25
-rw-r--r--sync/engine/build_commit_command_unittest.cc11
-rw-r--r--sync/engine/commit.cc100
-rw-r--r--sync/engine/commit.h37
-rw-r--r--sync/engine/get_commit_ids_command.cc40
-rw-r--r--sync/engine/get_commit_ids_command.h25
-rw-r--r--sync/engine/net/server_connection_manager.h2
-rw-r--r--sync/engine/post_commit_message_command.cc49
-rw-r--r--sync/engine/post_commit_message_command.h28
-rw-r--r--sync/engine/process_commit_response_command.cc132
-rw-r--r--sync/engine/process_commit_response_command.h41
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc102
-rw-r--r--sync/engine/syncer.cc60
-rw-r--r--sync/engine/syncer.h4
-rw-r--r--sync/engine/syncer_unittest.cc185
-rw-r--r--sync/internal_api/all_status.cc2
-rw-r--r--sync/internal_api/js_sync_manager_observer_unittest.cc2
-rw-r--r--sync/internal_api/sync_manager.cc1
-rw-r--r--sync/internal_api/sync_manager.h3
-rw-r--r--sync/sessions/ordered_commit_set.cc22
-rw-r--r--sync/sessions/ordered_commit_set.h16
-rw-r--r--sync/sessions/ordered_commit_set_unittest.cc12
-rw-r--r--sync/sessions/session_state.cc23
-rw-r--r--sync/sessions/session_state.h11
-rw-r--r--sync/sessions/session_state_unittest.cc9
-rw-r--r--sync/sessions/status_controller.cc25
-rw-r--r--sync/sessions/status_controller.h79
-rw-r--r--sync/sessions/status_controller_unittest.cc29
-rw-r--r--sync/sessions/sync_session.cc8
-rw-r--r--sync/sessions/sync_session_unittest.cc20
-rw-r--r--sync/sessions/test_util.cc1
-rw-r--r--sync/sync.gyp4
-rw-r--r--sync/syncable/syncable.h2
-rw-r--r--sync/test/engine/mock_connection_manager.cc1
-rw-r--r--sync/test/engine/mock_connection_manager.h1
36 files changed, 625 insertions, 551 deletions
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
index 86d1d8b..f384c18 100644
--- a/sync/engine/build_commit_command.cc
+++ b/sync/engine/build_commit_command.cc
@@ -12,9 +12,10 @@
#include "base/string_util.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/protocol/bookmark_specifics.pb.h"
+#include "sync/sessions/ordered_commit_set.h"
#include "sync/sessions/sync_session.h"
-#include "sync/syncable/syncable.h"
#include "sync/syncable/syncable_changes_version.h"
+#include "sync/syncable/syncable.h"
#include "sync/util/time.h"
using std::set;
@@ -49,7 +50,12 @@ int64 BuildCommitCommand::GetGap() {
return 1LL << 20;
}
-BuildCommitCommand::BuildCommitCommand() {}
+BuildCommitCommand::BuildCommitCommand(
+ const sessions::OrderedCommitSet& batch_commit_set,
+ ClientToServerMessage* commit_message)
+ : batch_commit_set_(batch_commit_set), commit_message_(commit_message) {
+}
+
BuildCommitCommand::~BuildCommitCommand() {}
void BuildCommitCommand::AddExtensionsActivityToMessage(
@@ -57,21 +63,29 @@ void BuildCommitCommand::AddExtensionsActivityToMessage(
// We only send ExtensionsActivity to the server if bookmarks are being
// committed.
ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
- if (!session->status_controller().HasBookmarkCommitActivity()) {
- // Return the records to the activity monitor.
- monitor->PutRecords(session->extensions_activity());
- session->mutable_extensions_activity()->clear();
- return;
- }
- const ExtensionsActivityMonitor::Records& records =
- session->extensions_activity();
- for (ExtensionsActivityMonitor::Records::const_iterator it = records.begin();
- it != records.end(); ++it) {
- sync_pb::ChromiumExtensionsActivity* activity_message =
- message->add_extensions_activity();
- activity_message->set_extension_id(it->second.extension_id);
- activity_message->set_bookmark_writes_since_last_commit(
- it->second.bookmark_write_count);
+ if (batch_commit_set_.HasBookmarkCommitId()) {
+ // This isn't perfect, since the set of extensions activity may not
+ // correlate exactly with the items being committed. That's OK as
+ // long as we're looking for a rough estimate of extensions activity,
+ // not an precise mapping of which commits were triggered by which
+ // extension.
+ //
+ // We will push this list of extensions activity back into the
+ // ExtensionsActivityMonitor if this commit fails. That's why we must keep
+ // a copy of these records in the session.
+ monitor->GetAndClearRecords(session->mutable_extensions_activity());
+
+ const ExtensionsActivityMonitor::Records& records =
+ session->extensions_activity();
+ for (ExtensionsActivityMonitor::Records::const_iterator it =
+ records.begin();
+ it != records.end(); ++it) {
+ sync_pb::ChromiumExtensionsActivity* activity_message =
+ message->add_extensions_activity();
+ activity_message->set_extension_id(it->second.extension_id);
+ activity_message->set_bookmark_writes_since_last_commit(
+ it->second.bookmark_write_count);
+ }
}
}
@@ -86,16 +100,15 @@ void SetEntrySpecifics(MutableEntry* meta_entry, SyncEntity* sync_entry) {
} // namespace
SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
- ClientToServerMessage message;
- message.set_share(session->context()->account_name());
- message.set_message_contents(ClientToServerMessage::COMMIT);
+ commit_message_->set_share(session->context()->account_name());
+ commit_message_->set_message_contents(ClientToServerMessage::COMMIT);
- CommitMessage* commit_message = message.mutable_commit();
+ CommitMessage* commit_message = commit_message_->mutable_commit();
commit_message->set_cache_guid(
session->write_transaction()->directory()->cache_guid());
AddExtensionsActivityToMessage(session, commit_message);
SyncerProtoUtil::AddRequestBirthday(
- session->write_transaction()->directory(), &message);
+ session->write_transaction()->directory(), commit_message_);
// Cache previously computed position values. Because |commit_ids|
// is already in sibling order, we should always hit this map after
@@ -105,9 +118,8 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
// whose ID is the map's key.
std::map<Id, std::pair<int64, int64> > position_map;
- const vector<Id>& commit_ids = session->status_controller().commit_ids();
- for (size_t i = 0; i < commit_ids.size(); i++) {
- Id id = commit_ids[i];
+ for (size_t i = 0; i < batch_commit_set_.Size(); i++) {
+ Id id = batch_commit_set_.GetCommitIdAt(i);
SyncEntity* sync_entry =
static_cast<SyncEntity*>(commit_message->add_entries());
sync_entry->set_id(id);
@@ -208,8 +220,6 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
SetEntrySpecifics(&meta_entry, sync_entry);
}
}
- session->mutable_status_controller()->
- mutable_commit_message()->CopyFrom(message);
return SYNCER_OK;
}
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
index d18c94b..91eb3203 100644
--- a/sync/engine/build_commit_command.h
+++ b/sync/engine/build_commit_command.h
@@ -14,9 +14,26 @@
namespace browser_sync {
+namespace sessions {
+class OrderedCommitSet;
+}
+
+// A class that contains the code used to serialize a set of sync items into a
+// protobuf commit message. This conversion process references the
+// syncable::Directory, which is why it must be called within the same
+// transaction as the GetCommitIdsCommand that fetched the set of items to be
+// committed.
+//
+// See SyncerCommand documentation for more info.
class BuildCommitCommand : public SyncerCommand {
public:
- BuildCommitCommand();
+ // The batch_commit_set parameter contains a set of references to the items
+ // that should be committed.
+ //
+ // The commit_message parameter is an output parameter which will contain the
+ // fully initialized commit message once ExecuteImpl() has been called.
+ BuildCommitCommand(const sessions::OrderedCommitSet& batch_commit_set,
+ ClientToServerMessage* commit_message);
virtual ~BuildCommitCommand();
// SyncerCommand implementation.
@@ -44,6 +61,12 @@ class BuildCommitCommand : public SyncerCommand {
int64 InterpolatePosition(int64 lo, int64 hi);
DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
+
+ // Input parameter; see constructor comment.
+ const sessions::OrderedCommitSet& batch_commit_set_;
+
+ // Output parameter; see constructor comment.
+ ClientToServerMessage* commit_message_;
};
} // namespace browser_sync
diff --git a/sync/engine/build_commit_command_unittest.cc b/sync/engine/build_commit_command_unittest.cc
index f0a5bea..6263615 100644
--- a/sync/engine/build_commit_command_unittest.cc
+++ b/sync/engine/build_commit_command_unittest.cc
@@ -10,7 +10,16 @@ namespace browser_sync {
// A test fixture for tests exercising ClearDataCommandTest.
class BuildCommitCommandTest : public SyncerCommandTest {
protected:
- BuildCommitCommandTest() {}
+ BuildCommitCommandTest()
+ : batch_commit_set_(ModelSafeRoutingInfo()),
+ command_(batch_commit_set_, &commit_message_) {
+ }
+
+ private:
+ sessions::OrderedCommitSet batch_commit_set_;
+ ClientToServerMessage commit_message_;
+
+ protected:
BuildCommitCommand command_;
private:
diff --git a/sync/engine/commit.cc b/sync/engine/commit.cc
new file mode 100644
index 0000000..ae6b8ae
--- /dev/null
+++ b/sync/engine/commit.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/engine/commit.h"
+
+#include "base/debug/trace_event.h"
+#include "sync/engine/build_commit_command.h"
+#include "sync/engine/get_commit_ids_command.h"
+#include "sync/engine/process_commit_response_command.h"
+#include "sync/engine/syncer_proto_util.h"
+#include "sync/sessions/sync_session.h"
+
+using syncable::SYNCER;
+using syncable::WriteTransaction;
+
+namespace browser_sync {
+
+using sessions::SyncSession;
+using sessions::StatusController;
+
+// Helper function that finds sync items that are ready to be committed to the
+// server and serializes them into a commit message protobuf. It will return
+// false iff there are no entries ready to be committed at this time.
+//
+// The OrderedCommitSet parameter is an output parameter which will contain
+// the set of all items which are to be committed. The number of items in
+// the set shall not exceed the maximum batch size. (The default batch size
+// is currently 25, though it can be overwritten by the server.)
+//
+// The ClientToServerMessage parameter is an output parameter which will contain
+// the commit message which should be sent to the server. It is valid iff the
+// return value of this function is true.
+bool PrepareCommitMessage(sessions::SyncSession* session,
+ sessions::OrderedCommitSet* commit_set,
+ ClientToServerMessage* commit_message) {
+ TRACE_EVENT0("sync", "PrepareCommitMessage");
+
+ commit_set->Clear();
+ commit_message->Clear();
+
+ WriteTransaction trans(FROM_HERE, SYNCER, session->context()->directory());
+ sessions::ScopedSetSessionWriteTransaction set_trans(session, &trans);
+
+ // Fetch the items to commit.
+ const size_t batch_size = session->context()->max_commit_batch_size();
+ GetCommitIdsCommand get_commit_ids_command(batch_size, commit_set);
+ get_commit_ids_command.Execute(session);
+
+ DVLOG(1) << "Commit message will contain " << commit_set->Size() << " items.";
+ if (commit_set->Empty())
+ return false;
+
+ // Serialize the message.
+ BuildCommitCommand build_commit_command(*commit_set, commit_message);
+ build_commit_command.Execute(session);
+
+ return true;
+}
+
+SyncerError BuildAndPostCommits(Syncer* syncer,
+ sessions::SyncSession* session) {
+ StatusController* status_controller = session->mutable_status_controller();
+
+ sessions::OrderedCommitSet commit_set(session->routing_info());
+ ClientToServerMessage commit_message;
+ while (PrepareCommitMessage(session, &commit_set, &commit_message)
+ && !syncer->ExitRequested()) {
+ ClientToServerResponse commit_response;
+
+ DVLOG(1) << "Sending commit message.";
+ TRACE_EVENT_BEGIN0("sync", "PostCommit");
+ status_controller->set_last_post_commit_result(
+ SyncerProtoUtil::PostClientToServerMessage(commit_message,
+ &commit_response,
+ session));
+ TRACE_EVENT_END0("sync", "PostCommit");
+
+ // ProcessCommitResponse includes some code that cleans up after a failure
+ // to post a commit message, so we must run it regardless of whether or not
+ // the commit succeeds.
+
+ TRACE_EVENT_BEGIN0("sync", "ProcessCommitResponse");
+ ProcessCommitResponseCommand process_response_command(
+ commit_set, commit_message, commit_response);
+ status_controller->set_last_process_commit_response_result(
+ process_response_command.Execute(session));
+ TRACE_EVENT_END0("sync", "ProcessCommitResponse");
+
+ // Exit early if either the commit or the response processing failed.
+ if (status_controller->last_post_commit_result() != SYNCER_OK)
+ return status_controller->last_post_commit_result();
+ if (status_controller->last_process_commit_response_result() != SYNCER_OK)
+ return status_controller->last_process_commit_response_result();
+ }
+
+ return SYNCER_OK;
+}
+
+} // namespace browser_sync
diff --git a/sync/engine/commit.h b/sync/engine/commit.h
new file mode 100644
index 0000000..f5ccd79
--- /dev/null
+++ b/sync/engine/commit.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_ENGINE_COMMIT_H_
+#define SYNC_ENGINE_COMMIT_H_
+#pragma once
+
+#include "sync/util/syncer_error.h"
+
+namespace browser_sync {
+
+namespace sessions {
+class SyncSession;
+}
+
+class Syncer;
+
+// This function will commit batches of unsynced items to the server until the
+// number of unsynced and ready to commit items reaches zero or an error is
+// encountered. A request to exit early will be treated as an error and will
+// abort any blocking operations.
+//
+// The Syncer parameter is provided only for access to its ExitRequested()
+// method. This is technically unnecessary since an early exit request should
+// be detected as we attempt to contact the sync server.
+//
+// The SyncSession parameter contains pointers to various bits of state,
+// including the syncable::Directory that contains all sync items and the
+// ServerConnectionManager used to contact the server.
+SyncerError BuildAndPostCommits(
+ Syncer* syncer,
+ sessions::SyncSession* session);
+
+} // namespace browser_sync
+
+#endif // SYNC_ENGINE_COMMIT_H_
diff --git a/sync/engine/get_commit_ids_command.cc b/sync/engine/get_commit_ids_command.cc
index ecc5cf9..a09d5d7 100644
--- a/sync/engine/get_commit_ids_command.cc
+++ b/sync/engine/get_commit_ids_command.cc
@@ -22,8 +22,12 @@ using sessions::OrderedCommitSet;
using sessions::SyncSession;
using sessions::StatusController;
-GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
- : requested_commit_batch_size_(commit_batch_size) {}
+GetCommitIdsCommand::GetCommitIdsCommand(
+ const size_t commit_batch_size,
+ sessions::OrderedCommitSet* commit_set)
+ : requested_commit_batch_size_(commit_batch_size),
+ commit_set_(commit_set) {
+}
GetCommitIdsCommand::~GetCommitIdsCommand() {}
@@ -61,17 +65,12 @@ SyncerError GetCommitIdsCommand::ExecuteImpl(SyncSession* session) {
session->routing_info(),
ready_unsynced_set);
- StatusController* status = session->mutable_status_controller();
- syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
- ready_unsynced_set.begin(), ready_unsynced_set.end());
- status->set_unsynced_handles(ready_unsynced_vector);
const vector<syncable::Id>& verified_commit_ids =
- ordered_commit_set_->GetAllCommitIds();
+ commit_set_->GetAllCommitIds();
for (size_t i = 0; i < verified_commit_ids.size(); i++)
DVLOG(1) << "Debug commit batch result:" << verified_commit_ids[i];
- status->set_commit_set(*ordered_commit_set_.get());
return SYNCER_OK;
}
@@ -180,7 +179,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
CHECK(parent.good()) << "Bad user-only parent in item path.";
int64 handle = parent.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(handle)) {
+ if (commit_set_->HaveCommitItem(handle)) {
// We've already added this parent (and therefore all of its parents).
// We can return early.
break;
@@ -188,7 +187,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
if (!AddItemThenPredecessors(trans, ready_unsynced_set, parent,
&item_dependencies)) {
// There was a parent/predecessor in conflict. We return without adding
- // anything to |ordered_commit_set_|.
+ // anything to |commit_set|.
DVLOG(1) << "Parent or parent's predecessor was in conflict, omitting "
<< item;
return false;
@@ -226,7 +225,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors(
const syncable::Entry& item,
OrderedCommitSet* result) const {
int64 item_handle = item.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(item_handle)) {
+ if (commit_set_->HaveCommitItem(item_handle)) {
// We've already added this item to the commit set, and so must have
// already added the predecessors as well.
return true;
@@ -243,7 +242,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors(
if (!prev.Get(syncable::IS_UNSYNCED))
break;
int64 handle = prev.Get(syncable::META_HANDLE);
- if (ordered_commit_set_->HaveCommitItem(handle)) {
+ if (commit_set_->HaveCommitItem(handle)) {
// We've already added this item to the commit set, and so must have
// already added the predecessors as well.
return true;
@@ -276,7 +275,7 @@ bool GetCommitIdsCommand::AddPredecessorsThenItem(
}
bool GetCommitIdsCommand::IsCommitBatchFull() const {
- return ordered_commit_set_->Size() >= requested_commit_batch_size_;
+ return commit_set_->Size() >= requested_commit_batch_size_;
}
void GetCommitIdsCommand::AddCreatesAndMoves(
@@ -287,7 +286,7 @@ void GetCommitIdsCommand::AddCreatesAndMoves(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::Entry entry(write_transaction,
@@ -308,14 +307,14 @@ void GetCommitIdsCommand::AddCreatesAndMoves(
ready_unsynced_set,
entry,
&item_dependencies)) {
- ordered_commit_set_->Append(item_dependencies);
+ commit_set_->Append(item_dependencies);
}
}
}
// It's possible that we overcommitted while trying to expand dependent
// items. If so, truncate the set down to the allowed size.
- ordered_commit_set_->Truncate(requested_commit_batch_size_);
+ commit_set_->Truncate(requested_commit_batch_size_);
}
void GetCommitIdsCommand::AddDeletes(
@@ -326,7 +325,7 @@ void GetCommitIdsCommand::AddDeletes(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::Entry entry(write_transaction, syncable::GET_BY_HANDLE,
@@ -357,7 +356,7 @@ void GetCommitIdsCommand::AddDeletes(
DVLOG(1) << "Inserting moved and deleted entry, will be missed by "
<< "delete roll." << entry.Get(syncable::ID);
- ordered_commit_set_->AddCommitItem(metahandle,
+ commit_set_->AddCommitItem(metahandle,
entry.Get(syncable::ID),
entry.GetModelType());
}
@@ -388,14 +387,14 @@ void GetCommitIdsCommand::AddDeletes(
for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin();
!IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) {
int64 metahandle = *iter;
- if (ordered_commit_set_->HaveCommitItem(metahandle))
+ if (commit_set_->HaveCommitItem(metahandle))
continue;
syncable::MutableEntry entry(write_transaction, syncable::GET_BY_HANDLE,
metahandle);
if (entry.Get(syncable::IS_DEL)) {
syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
if (legal_delete_parents.count(parent_id)) {
- ordered_commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID),
+ commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID),
entry.GetModelType());
}
}
@@ -406,7 +405,6 @@ void GetCommitIdsCommand::BuildCommitIds(
syncable::WriteTransaction* write_transaction,
const ModelSafeRoutingInfo& routes,
const std::set<int64>& ready_unsynced_set) {
- ordered_commit_set_.reset(new OrderedCommitSet(routes));
// Commits follow these rules:
// 1. Moves or creates are preceded by needed folder creates, from
// root to leaf. For folders whose contents are ordered, moves
diff --git a/sync/engine/get_commit_ids_command.h b/sync/engine/get_commit_ids_command.h
index 334fb6b..9aca7cd 100644
--- a/sync/engine/get_commit_ids_command.h
+++ b/sync/engine/get_commit_ids_command.h
@@ -20,11 +20,23 @@ using std::vector;
namespace browser_sync {
+// A class that contains the code used to search the syncable::Directory for
+// locally modified items that are ready to be committed to the server.
+//
+// See SyncerCommand documentation for more info.
class GetCommitIdsCommand : public SyncerCommand {
friend class SyncerTest;
public:
- explicit GetCommitIdsCommand(int commit_batch_size);
+ // The batch_size parameter is the maximum number of entries we are allowed
+ // to commit in a single batch. This value can be modified by the server.
+ //
+ // The ordered_commit_set parameter is an output parameter that will contain a
+ // set of items that are ready to commit. Its size shall not exceed the
+ // provided batch_size. This contents of this "set" will be ordered; see the
+ // comments in this class' implementation for details.
+ GetCommitIdsCommand(const size_t commit_batch_size,
+ sessions::OrderedCommitSet* ordered_commit_set);
virtual ~GetCommitIdsCommand();
// SyncerCommand implementation.
@@ -93,8 +105,7 @@ class GetCommitIdsCommand : public SyncerCommand {
sessions::OrderedCommitSet* result) const;
// Appends all commit ready predecessors of |item|, followed by |item| itself,
- // to |ordered_commit_set_|, iff item and all its predecessors not in
- // conflict.
+ // to |commit_set|, iff item and all its predecessors not in conflict.
// Return values:
// False: if there was an entry in conflict.
// True: if all entries were checked for commit readiness and added to
@@ -103,7 +114,7 @@ class GetCommitIdsCommand : public SyncerCommand {
const ModelSafeRoutingInfo& routes,
const std::set<int64>& ready_unsynced_set,
const syncable::Entry& item,
- sessions::OrderedCommitSet* result) const;
+ sessions::OrderedCommitSet* commit_set) const;
bool IsCommitBatchFull() const;
@@ -114,9 +125,11 @@ class GetCommitIdsCommand : public SyncerCommand {
void AddDeletes(syncable::WriteTransaction* write_transaction,
const std::set<int64>& ready_unsynced_set);
- scoped_ptr<sessions::OrderedCommitSet> ordered_commit_set_;
+ // Input parameter; see constructor comment.
+ const size_t requested_commit_batch_size_;
- int requested_commit_batch_size_;
+ // Output parameter; see constructor comment.
+ sessions::OrderedCommitSet* commit_set_;
DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
};
diff --git a/sync/engine/net/server_connection_manager.h b/sync/engine/net/server_connection_manager.h
index 2121d8e..e6e18da 100644
--- a/sync/engine/net/server_connection_manager.h
+++ b/sync/engine/net/server_connection_manager.h
@@ -28,8 +28,6 @@ class ClientToServerMessage;
namespace browser_sync {
-class ClientToServerMessage;
-
static const int32 kUnsetResponseCode = -1;
static const int32 kUnsetContentLength = -1;
static const int32 kUnsetPayloadLength = -1;
diff --git a/sync/engine/post_commit_message_command.cc b/sync/engine/post_commit_message_command.cc
deleted file mode 100644
index 8f51f20..0000000
--- a/sync/engine/post_commit_message_command.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/post_commit_message_command.h"
-
-#include <vector>
-
-#include "base/location.h"
-#include "sync/engine/syncer_proto_util.h"
-#include "sync/engine/syncproto.h"
-#include "sync/sessions/sync_session.h"
-
-using std::vector;
-
-namespace browser_sync {
-
-PostCommitMessageCommand::PostCommitMessageCommand() {}
-PostCommitMessageCommand::~PostCommitMessageCommand() {}
-
-SyncerError PostCommitMessageCommand::ExecuteImpl(
- sessions::SyncSession* session) {
- if (session->status_controller().commit_ids().empty())
- return SYNCER_OK; // Nothing to commit.
- ClientToServerResponse response;
- syncable::Directory* dir = session->context()->directory();
- sessions::StatusController* status = session->mutable_status_controller();
- SyncerError result = SyncerProtoUtil::PostClientToServerMessage(
- status->commit_message(), &response, session);
- if (result != SYNCER_OK) {
- // None of our changes got through. Clear the SYNCING bit which was
- // set to true during BuildCommitCommand, and which may still be true.
- // Not to be confused with IS_UNSYNCED, this bit is used to detect local
- // changes to items that happen during the server Commit operation.
- syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
- const vector<syncable::Id>& commit_ids = status->commit_ids();
- for (size_t i = 0; i < commit_ids.size(); i++) {
- syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
- entry.Put(syncable::SYNCING, false);
- }
- return result;
- }
-
- status->set_items_committed();
- status->mutable_commit_response()->CopyFrom(response);
- return SYNCER_OK;
-}
-
-} // namespace browser_sync
diff --git a/sync/engine/post_commit_message_command.h b/sync/engine/post_commit_message_command.h
deleted file mode 100644
index 50fae39..0000000
--- a/sync/engine/post_commit_message_command.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
-#define SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
-#pragma once
-
-#include "base/compiler_specific.h"
-#include "sync/engine/syncer_command.h"
-
-namespace browser_sync {
-
-class PostCommitMessageCommand : public SyncerCommand {
- public:
- PostCommitMessageCommand();
- virtual ~PostCommitMessageCommand();
-
- // SyncerCommand implementation.
- virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PostCommitMessageCommand);
-};
-
-} // namespace browser_sync
-
-#endif // SYNC_ENGINE_POST_COMMIT_MESSAGE_COMMAND_H_
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/process_commit_response_command.cc
index 6b2f633..6514bc1 100644
--- a/sync/engine/process_commit_response_command.cc
+++ b/sync/engine/process_commit_response_command.cc
@@ -48,7 +48,15 @@ using sessions::StatusController;
using sessions::SyncSession;
using sessions::ConflictProgress;
-ProcessCommitResponseCommand::ProcessCommitResponseCommand() {}
+ProcessCommitResponseCommand::ProcessCommitResponseCommand(
+ const sessions::OrderedCommitSet& commit_set,
+ const ClientToServerMessage& commit_message,
+ const ClientToServerResponse& commit_response)
+ : commit_set_(commit_set),
+ commit_message_(commit_message),
+ commit_response_(commit_response) {
+}
+
ProcessCommitResponseCommand::~ProcessCommitResponseCommand() {}
std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
@@ -57,10 +65,9 @@ std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
syncable::Directory* dir = session.context()->directory();
syncable::ReadTransaction trans(FROM_HERE, dir);
- const StatusController& status = session.status_controller();
- for (size_t i = 0; i < status.commit_ids().size(); ++i) {
+ for (size_t i = 0; i < commit_set_.Size(); ++i) {
groups_with_commits.insert(
- GetGroupForModelType(status.GetUnrestrictedCommitModelTypeAt(i),
+ GetGroupForModelType(commit_set_.GetModelTypeAt(i),
session.routing_info()));
}
@@ -68,19 +75,18 @@ std::set<ModelSafeGroup> ProcessCommitResponseCommand::GetGroupsToChange(
}
SyncerError ProcessCommitResponseCommand::ModelNeutralExecuteImpl(
- sessions::SyncSession* session) {
- const StatusController& status = session->status_controller();
- const ClientToServerResponse& response(status.commit_response());
- const vector<syncable::Id>& commit_ids(status.commit_ids());
+ SyncSession* session) {
+ syncable::Directory* dir = session->context()->directory();
+ const vector<syncable::Id>& commit_ids = commit_set_.GetAllCommitIds();
- if (!response.has_commit()) {
- // TODO(sync): What if we didn't try to commit anything?
+ if (!commit_response_.has_commit()) {
LOG(WARNING) << "Commit response has no commit body!";
+ ClearSyncingBits(dir, commit_ids);
return SERVER_RESPONSE_VALIDATION_FAILED;
}
- const CommitResponse& cr = response.commit();
- int commit_count = commit_ids.size();
+ const CommitResponse& cr = commit_response_.commit();
+ int commit_count = commit_set_.Size();
if (cr.entryresponse_size() != commit_count) {
LOG(ERROR) << "Commit response has wrong number of entries! Expected:" <<
commit_count << " Got:" << cr.entryresponse_size();
@@ -90,6 +96,7 @@ SyncerError ProcessCommitResponseCommand::ModelNeutralExecuteImpl(
if (cr.entryresponse(i).has_error_message())
LOG(ERROR) << " " << cr.entryresponse(i).error_message();
}
+ ClearSyncingBits(dir, commit_ids);
return SERVER_RESPONSE_VALIDATION_FAILED;
}
return SYNCER_OK;
@@ -99,60 +106,60 @@ SyncerError ProcessCommitResponseCommand::ModelChangingExecuteImpl(
SyncSession* session) {
SyncerError result = ProcessCommitResponse(session);
ExtensionsActivityMonitor* monitor = session->context()->extensions_monitor();
- if (session->status_controller().HasBookmarkCommitActivity() &&
- session->status_controller().syncer_status()
+
+ // This is to be run on one model only: the bookmark model.
+ if (session->status_controller().HasBookmarkCommitActivity()) {
+ // If the commit failed, return the data to the ExtensionsActivityMonitor.
+ if (session->status_controller().syncer_status()
.num_successful_bookmark_commits == 0) {
- monitor->PutRecords(session->extensions_activity());
+ monitor->PutRecords(session->extensions_activity());
+ }
+ // Clear our cached data in either case.
session->mutable_extensions_activity()->clear();
}
+
return result;
}
SyncerError ProcessCommitResponseCommand::ProcessCommitResponse(
SyncSession* session) {
syncable::Directory* dir = session->context()->directory();
-
StatusController* status = session->mutable_status_controller();
- const ClientToServerResponse& response(status->commit_response());
- const CommitResponse& cr = response.commit();
- const sync_pb::CommitMessage& commit_message =
- status->commit_message().commit();
-
- // If we try to commit a parent and child together and the parent conflicts
- // the child will have a bad parent causing an error. As this is not a
- // critical error, we trap it and don't LOG(ERROR). To enable this we keep
- // a map of conflicting new folders.
+ const CommitResponse& cr = commit_response_.commit();
+ const sync_pb::CommitMessage& commit_message = commit_message_.commit();
+
int transient_error_commits = 0;
int conflicting_commits = 0;
int error_commits = 0;
int successes = 0;
- set<syncable::Id> conflicting_new_folder_ids;
+
set<syncable::Id> deleted_folders;
ConflictProgress* conflict_progress = status->mutable_conflict_progress();
- OrderedCommitSet::Projection proj = status->commit_id_projection();
+ OrderedCommitSet::Projection proj = status->commit_id_projection(
+ commit_set_);
+
if (!proj.empty()) { // Scope for WriteTransaction.
WriteTransaction trans(FROM_HERE, SYNCER, dir);
for (size_t i = 0; i < proj.size(); i++) {
- CommitResponse::ResponseType response_type =
- ProcessSingleCommitResponse(&trans, cr.entryresponse(proj[i]),
- commit_message.entries(proj[i]),
- status->GetCommitIdAt(proj[i]),
- &conflicting_new_folder_ids,
- &deleted_folders);
+ CommitResponse::ResponseType response_type = ProcessSingleCommitResponse(
+ &trans,
+ cr.entryresponse(proj[i]),
+ commit_message.entries(proj[i]),
+ commit_set_.GetCommitIdAt(proj[i]),
+ &deleted_folders);
switch (response_type) {
case CommitResponse::INVALID_MESSAGE:
++error_commits;
break;
case CommitResponse::CONFLICT:
++conflicting_commits;
- // Only server CONFLICT responses will activate conflict resolution.
conflict_progress->AddServerConflictingItemById(
- status->GetCommitIdAt(proj[i]));
+ commit_set_.GetCommitIdAt(proj[i]));
break;
case CommitResponse::SUCCESS:
// TODO(sync): worry about sync_rate_ rate calc?
++successes;
- if (status->GetCommitModelTypeAt(proj[i]) == syncable::BOOKMARKS)
+ if (commit_set_.GetModelTypeAt(proj[i]) == syncable::BOOKMARKS)
status->increment_num_successful_bookmark_commits();
status->increment_num_successful_commits();
break;
@@ -171,16 +178,36 @@ SyncerError ProcessCommitResponseCommand::ProcessCommitResponse(
SyncerUtil::MarkDeletedChildrenSynced(dir, &deleted_folders);
int commit_count = static_cast<int>(proj.size());
- if (commit_count == (successes + conflicting_commits)) {
- // We consider conflicting commits as a success because things will work out
- // on their own when we receive them. Flags will be set so that
- // HasMoreToSync() will cause SyncScheduler to enter another sync cycle to
- // handle this condition.
+ if (commit_count == successes) {
return SYNCER_OK;
} else if (error_commits > 0) {
return SERVER_RETURN_UNKNOWN_ERROR;
} else if (transient_error_commits > 0) {
return SERVER_RETURN_TRANSIENT_ERROR;
+ } else if (conflicting_commits > 0) {
+ // This means that the server already has an item with this version, but
+ // we haven't seen that update yet.
+ //
+ // A well-behaved client should respond to this by proceeding to the
+ // download updates phase, fetching the conflicting items, then attempting
+ // to resolve the conflict. That's not what this client does.
+ //
+ // We don't currently have any code to support that exceptional control
+ // flow. We don't intend to add any because this response code will be
+ // deprecated soon. Instead, we handle this in the same way that we handle
+ // transient errors. We abort the current sync cycle, wait a little while,
+ // then try again. The retry sync cycle will attempt to download updates
+ // which should be sufficient to trigger client-side conflict resolution.
+ //
+ // Not treating this as an error would be dangerous. There's a risk that
+ // the commit loop would loop indefinitely. The loop won't exit until the
+ // number of unsynced items hits zero or an error is detected. If we're
+ // constantly receiving conflict responses and we don't treat them as
+ // errors, there would be no reason to leave that loop.
+ //
+ // TODO: Remove this option when the CONFLICT return value is fully
+ // deprecated.
+ return SERVER_RETURN_TRANSIENT_ERROR;
} else {
LOG(FATAL) << "Inconsistent counts when processing commit response";
return SYNCER_OK;
@@ -200,7 +227,6 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
const sync_pb::SyncEntity& commit_request_entry,
const syncable::Id& pre_commit_id,
- std::set<syncable::Id>* conflicting_new_folder_ids,
set<syncable::Id>* deleted_folders) {
const CommitResponse_EntryResponse& server_entry =
@@ -229,10 +255,6 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
}
if (CommitResponse::CONFLICT == response) {
DVLOG(1) << "Conflict Committing: " << local_entry;
- // TODO(nick): conflicting_new_folder_ids is a purposeless anachronism.
- if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
- conflicting_new_folder_ids->insert(pre_commit_id);
- }
return response;
}
if (CommitResponse::RETRY == response) {
@@ -478,4 +500,22 @@ void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
}
}
+void ProcessCommitResponseCommand::ClearSyncingBits(
+ syncable::Directory *dir,
+ const vector<syncable::Id>& commit_ids) {
+ // This is part of the cleanup in the case of a failed commit. Normally we
+ // would unset the SYNCING bit when processing the commit response. In the
+ // failure case we don't process the response, so we need to clear those bits
+ // here.
+ syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
+ for (size_t i = 0; i < commit_ids.size(); i++) {
+ syncable::MutableEntry entry(&trans, syncable::GET_BY_ID, commit_ids[i]);
+ if (entry.good()) {
+ entry.Put(syncable::SYNCING, false);
+ } else {
+ LOG(WARNING) << "Id: " << commit_ids[i] << " disappeared";
+ }
+ }
+}
+
} // namespace browser_sync
diff --git a/sync/engine/process_commit_response_command.h b/sync/engine/process_commit_response_command.h
index 8e288de..0680cc4 100644
--- a/sync/engine/process_commit_response_command.h
+++ b/sync/engine/process_commit_response_command.h
@@ -18,14 +18,43 @@ namespace syncable {
class Id;
class WriteTransaction;
class MutableEntry;
+class Directory;
}
namespace browser_sync {
+namespace sessions {
+class OrderedCommitSet;
+}
+
+// A class that processes the server's response to our commmit attempt. Note
+// that some of the preliminary processing is performed in
+// PostClientToServerMessage command.
+//
+// As part of processing the commit response, this command will modify sync
+// entries. It can rename items, update their versions, etc.
+//
+// This command will return a non-SYNCER_OK value if an error occurred while
+// processing the response, or if the server's response indicates that it had
+// trouble processing the request.
+//
+// See SyncerCommand documentation for more info.
class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
public:
- ProcessCommitResponseCommand();
+ // The commit_set parameter contains references to all the items which were
+ // to be committed in this batch.
+ //
+ // The commmit_message parameter contains the message that was sent to the
+ // server.
+ //
+ // The commit_response parameter contains the response received from the
+ // server. This may be uninitialized if we were unable to contact the server
+ // or a serious error was encountered.
+ ProcessCommitResponseCommand(
+ const sessions::OrderedCommitSet& commit_set,
+ const ClientToServerMessage& commit_message,
+ const ClientToServerResponse& commit_response);
virtual ~ProcessCommitResponseCommand();
protected:
@@ -43,7 +72,6 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
const sync_pb::SyncEntity& pb_committed_entry,
const syncable::Id& pre_commit_id,
- std::set<syncable::Id>* conflicting_new_directory_ids,
std::set<syncable::Id>* deleted_folders);
// Actually does the work of execute.
@@ -93,6 +121,15 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
const sync_pb::SyncEntity& committed_entry,
const CommitResponse_EntryResponse& entry_response);
+ // Helper to clean up in case of failure.
+ void ClearSyncingBits(
+ syncable::Directory *dir,
+ const std::vector<syncable::Id>& commit_ids);
+
+ const sessions::OrderedCommitSet& commit_set_;
+ const ClientToServerMessage& commit_message_;
+ const ClientToServerResponse& commit_response_;
+
DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
};
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
index 517bc51..195d58a 100644
--- a/sync/engine/process_commit_response_command_unittest.cc
+++ b/sync/engine/process_commit_response_command_unittest.cc
@@ -47,11 +47,7 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
(*mutable_routing_info())[syncable::PREFERENCES] = GROUP_UI;
(*mutable_routing_info())[syncable::AUTOFILL] = GROUP_DB;
- commit_set_.reset(new sessions::OrderedCommitSet(routing_info()));
SyncerCommandTest::SetUp();
- // Need to explicitly use this-> to avoid obscure template
- // warning.
- this->ExpectNoGroupsToChange(command_);
}
protected:
@@ -113,12 +109,14 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
// record and a commit response for it in the syncer session. If item_id
// is a local ID, the item will be a create operation. Otherwise, it
// will be an edit.
- void CreateUnprocessedCommitResult(const Id& item_id,
- const Id& parent_id,
- const string& name,
- syncable::ModelType model_type) {
- sessions::StatusController* sync_state =
- session()->mutable_status_controller();
+ void CreateUnprocessedCommitResult(
+ const Id& item_id,
+ const Id& parent_id,
+ const string& name,
+ syncable::ModelType model_type,
+ sessions::OrderedCommitSet *commit_set,
+ browser_sync::ClientToServerMessage *commit,
+ browser_sync::ClientToServerResponse *response) {
bool is_folder = true;
int64 metahandle = 0;
CreateUnsyncedItem(item_id, parent_id, name, is_folder, model_type,
@@ -126,18 +124,14 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
// ProcessCommitResponseCommand consumes commit_ids from the session
// state, so we need to update that. O(n^2) because it's a test.
- commit_set_->AddCommitItem(metahandle, item_id, model_type);
- sync_state->set_commit_set(*commit_set_.get());
+ commit_set->AddCommitItem(metahandle, item_id, model_type);
WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
ASSERT_TRUE(entry.good());
entry.Put(syncable::SYNCING, true);
- // ProcessCommitResponseCommand looks at both the commit message as well
- // as the commit response, so we need to synthesize both here.
- sync_pb::ClientToServerMessage* commit =
- sync_state->mutable_commit_message();
+ // Add to the commit message.
commit->set_message_contents(ClientToServerMessage::COMMIT);
SyncEntity* entity = static_cast<SyncEntity*>(
commit->mutable_commit()->add_entries());
@@ -148,8 +142,7 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
entity->mutable_specifics()->CopyFrom(entry.Get(syncable::SPECIFICS));
entity->set_id(item_id);
- sync_pb::ClientToServerResponse* response =
- sync_state->mutable_commit_response();
+ // Add to the response message.
response->set_error_code(sync_pb::SyncEnums::SUCCESS);
sync_pb::CommitResponse_EntryResponse* entry_response =
response->mutable_commit()->add_entryresponse();
@@ -176,20 +169,15 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
}
}
- void SetLastErrorCode(CommitResponse::ResponseType error_code) {
- sessions::StatusController* sync_state =
- session()->mutable_status_controller();
- sync_pb::ClientToServerResponse* response =
- sync_state->mutable_commit_response();
+ void SetLastErrorCode(CommitResponse::ResponseType error_code,
+ sync_pb::ClientToServerResponse* response) {
sync_pb::CommitResponse_EntryResponse* entry_response =
response->mutable_commit()->mutable_entryresponse(
response->mutable_commit()->entryresponse_size() - 1);
entry_response->set_response_type(error_code);
}
- ProcessCommitResponseCommand command_;
TestIdFactory id_factory_;
- scoped_ptr<sessions::OrderedCommitSet> commit_set_;
private:
int64 next_old_revision_;
int64 next_new_revision_;
@@ -198,6 +186,10 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
};
TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
Id bookmark_folder_id = id_factory_.NewLocalId();
Id bookmark_id1 = id_factory_.NewLocalId();
Id bookmark_id2 = id_factory_.NewLocalId();
@@ -205,22 +197,30 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Id autofill_id1 = id_factory_.NewLocalId();
Id autofill_id2 = id_factory_.NewLocalId();
CreateUnprocessedCommitResult(bookmark_folder_id, id_factory_.root(),
- "A bookmark folder", syncable::BOOKMARKS);
+ "A bookmark folder", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(bookmark_id1, bookmark_folder_id,
- "bookmark 1", syncable::BOOKMARKS);
+ "bookmark 1", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(bookmark_id2, bookmark_folder_id,
- "bookmark 2", syncable::BOOKMARKS);
+ "bookmark 2", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(pref_id1, id_factory_.root(),
- "Pref 1", syncable::PREFERENCES);
+ "Pref 1", syncable::PREFERENCES,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(pref_id2, id_factory_.root(),
- "Pref 2", syncable::PREFERENCES);
+ "Pref 2", syncable::PREFERENCES,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(autofill_id1, id_factory_.root(),
- "Autofill 1", syncable::AUTOFILL);
+ "Autofill 1", syncable::AUTOFILL,
+ &commit_set, &request, &response);
CreateUnprocessedCommitResult(autofill_id2, id_factory_.root(),
- "Autofill 2", syncable::AUTOFILL);
+ "Autofill 2", syncable::AUTOFILL,
+ &commit_set, &request, &response);
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
+ command.ExecuteImpl(session());
ReadTransaction trans(FROM_HERE, directory());
Id new_fid;
@@ -270,10 +270,15 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
// how this scenario used to fail, reversing the order for the second half
// of the children.
TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
// Create the parent folder, a new item whose ID will change on commit.
Id folder_id = id_factory_.NewLocalId();
CreateUnprocessedCommitResult(folder_id, id_factory_.root(), "A",
- syncable::BOOKMARKS);
+ syncable::BOOKMARKS,
+ &commit_set, &request, &response);
// Verify that the item is reachable.
{
@@ -292,7 +297,8 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
CreateUnprocessedCommitResult(
- id, folder_id, base::StringPrintf("Item %d", i), syncable::BOOKMARKS);
+ id, folder_id, base::StringPrintf("Item %d", i), syncable::BOOKMARKS,
+ &commit_set, &request, &response);
}
// The second 25 children will be unsynced items but NOT part of the commit
// batch. When the ID of the parent folder changes during the commit,
@@ -309,8 +315,9 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// 25 items. This should apply the values indicated by
// each CommitResponse_EntryResponse to the syncable Entries. All new
// items in the commit batch should have their IDs changed to server IDs.
- ExpectGroupToChange(command_, GROUP_UI);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ ExpectGroupToChange(command, GROUP_UI);
+ command.ExecuteImpl(session());
ReadTransaction trans(FROM_HERE, directory());
// Lookup the parent folder by finding a child of the root. We can't use
@@ -391,20 +398,26 @@ INSTANTIATE_TEST_CASE_P(ProcessCommitResponse,
// happens to the extensions activity records. Commits could fail or succeed,
// depending on the test parameter.
TEST_P(MixedResult, ExtensionActivity) {
+ sessions::OrderedCommitSet commit_set(session()->routing_info());
+ browser_sync::ClientToServerMessage request;
+ browser_sync::ClientToServerResponse response;
+
EXPECT_NE(routing_info().find(syncable::BOOKMARKS)->second,
routing_info().find(syncable::AUTOFILL)->second)
<< "To not be lame, this test requires more than one active group.";
// Bookmark item setup.
CreateUnprocessedCommitResult(id_factory_.NewServerId(),
- id_factory_.root(), "Some bookmark", syncable::BOOKMARKS);
+ id_factory_.root(), "Some bookmark", syncable::BOOKMARKS,
+ &commit_set, &request, &response);
if (ShouldFailBookmarkCommit())
- SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR, &response);
// Autofill item setup.
CreateUnprocessedCommitResult(id_factory_.NewServerId(),
- id_factory_.root(), "Some autofill", syncable::AUTOFILL);
+ id_factory_.root(), "Some autofill", syncable::AUTOFILL,
+ &commit_set, &request, &response);
if (ShouldFailAutofillCommit())
- SetLastErrorCode(CommitResponse::TRANSIENT_ERROR);
+ SetLastErrorCode(CommitResponse::TRANSIENT_ERROR, &response);
// Put some extensions activity in the session.
{
@@ -415,8 +428,9 @@ TEST_P(MixedResult, ExtensionActivity) {
(*records)["xyz"].extension_id = "xyz";
(*records)["xyz"].bookmark_write_count = 4U;
}
- ExpectGroupsToChange(command_, GROUP_UI, GROUP_DB);
- command_.ExecuteImpl(session());
+ ProcessCommitResponseCommand command(commit_set, request, response);
+ command.ExecuteImpl(session());
+ ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
ExtensionsActivityMonitor::Records final_monitor_records;
context()->extensions_monitor()->GetAndClearRecords(&final_monitor_records);
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
index 5de4ae5..ac6b5eb7 100644
--- a/sync/engine/syncer.cc
+++ b/sync/engine/syncer.cc
@@ -14,11 +14,10 @@
#include "sync/engine/build_commit_command.h"
#include "sync/engine/cleanup_disabled_types_command.h"
#include "sync/engine/clear_data_command.h"
+#include "sync/engine/commit.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/download_updates_command.h"
-#include "sync/engine/get_commit_ids_command.h"
#include "sync/engine/net/server_connection_manager.h"
-#include "sync/engine/post_commit_message_command.h"
#include "sync/engine/process_commit_response_command.h"
#include "sync/engine/process_updates_command.h"
#include "sync/engine/resolve_conflicts_command.h"
@@ -65,9 +64,7 @@ const char* SyncerStepToString(const SyncerStep step)
ENUM_CASE(PROCESS_UPDATES);
ENUM_CASE(STORE_TIMESTAMPS);
ENUM_CASE(APPLY_UPDATES);
- ENUM_CASE(BUILD_COMMIT_REQUEST);
- ENUM_CASE(POST_COMMIT_MESSAGE);
- ENUM_CASE(PROCESS_COMMIT_RESPONSE);
+ ENUM_CASE(COMMIT);
ENUM_CASE(RESOLVE_CONFLICTS);
ENUM_CASE(APPLY_UPDATES_TO_RESOLVE_CONFLICTS);
ENUM_CASE(CLEAR_PRIVATE_DATA);
@@ -180,59 +177,12 @@ void Syncer::SyncShare(sessions::SyncSession* session,
last_step = SYNCER_END;
next_step = SYNCER_END;
} else {
- next_step = BUILD_COMMIT_REQUEST;
+ next_step = COMMIT;
}
break;
}
- // These two steps are combined since they are executed within the same
- // write transaction.
- case BUILD_COMMIT_REQUEST: {
- syncable::Directory* dir = session->context()->directory();
- WriteTransaction trans(FROM_HERE, SYNCER, dir);
- sessions::ScopedSetSessionWriteTransaction set_trans(session, &trans);
-
- DVLOG(1) << "Getting the Commit IDs";
- GetCommitIdsCommand get_commit_ids_command(
- session->context()->max_commit_batch_size());
- get_commit_ids_command.Execute(session);
-
- if (!session->status_controller().commit_ids().empty()) {
- DVLOG(1) << "Building a commit message";
-
- // This isn't perfect, since the set of extensions activity may not
- // correlate exactly with the items being committed. That's OK as
- // long as we're looking for a rough estimate of extensions activity,
- // not an precise mapping of which commits were triggered by which
- // extension.
- //
- // We will push this list of extensions activity back into the
- // ExtensionsActivityMonitor if this commit turns out to not contain
- // any bookmarks, or if the commit fails.
- session->context()->extensions_monitor()->GetAndClearRecords(
- session->mutable_extensions_activity());
-
- BuildCommitCommand build_commit_command;
- build_commit_command.Execute(session);
-
- next_step = POST_COMMIT_MESSAGE;
- } else {
- next_step = RESOLVE_CONFLICTS;
- }
-
- break;
- }
- case POST_COMMIT_MESSAGE: {
- PostCommitMessageCommand post_commit_command;
- session->mutable_status_controller()->set_last_post_commit_result(
- post_commit_command.Execute(session));
- next_step = PROCESS_COMMIT_RESPONSE;
- break;
- }
- case PROCESS_COMMIT_RESPONSE: {
- ProcessCommitResponseCommand process_response_command;
- session->mutable_status_controller()->
- set_last_process_commit_response_result(
- process_response_command.Execute(session));
+ case COMMIT: {
+ BuildAndPostCommits(this, session);
next_step = RESOLVE_CONFLICTS;
break;
}
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
index 86af488..a5b7fc8 100644
--- a/sync/engine/syncer.h
+++ b/sync/engine/syncer.h
@@ -35,9 +35,7 @@ enum SyncerStep {
PROCESS_UPDATES,
STORE_TIMESTAMPS,
APPLY_UPDATES,
- BUILD_COMMIT_REQUEST,
- POST_COMMIT_MESSAGE,
- PROCESS_COMMIT_RESPONSE,
+ COMMIT,
RESOLVE_CONFLICTS,
APPLY_UPDATES_TO_RESOLVE_CONFLICTS,
CLEAR_PRIVATE_DATA, // TODO(tim): Rename 'private' to 'user'.
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index 9253bd3..245c685 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -391,37 +391,31 @@ class SyncerTest : public testing::Test,
void DoTruncationTest(const vector<int64>& unsynced_handle_view,
const vector<syncable::Id>& expected_id_order) {
for (size_t limit = expected_id_order.size() + 2; limit > 0; --limit) {
- StatusController* status = session_->mutable_status_controller();
WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
ScopedSetSessionWriteTransaction set_trans(session_.get(), &wtrans);
ModelSafeRoutingInfo routes;
GetModelSafeRoutingInfo(&routes);
- GetCommitIdsCommand command(limit);
+ sessions::OrderedCommitSet output_set(routes);
+ GetCommitIdsCommand command(limit, &output_set);
std::set<int64> ready_unsynced_set;
command.FilterUnreadyEntries(&wtrans, syncable::ModelTypeSet(),
syncable::ModelTypeSet(), false,
unsynced_handle_view, &ready_unsynced_set);
command.BuildCommitIds(session_->write_transaction(), routes,
ready_unsynced_set);
- syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector(
- ready_unsynced_set.begin(), ready_unsynced_set.end());
- status->set_unsynced_handles(ready_unsynced_vector);
- vector<syncable::Id> output =
- command.ordered_commit_set_->GetAllCommitIds();
size_t truncated_size = std::min(limit, expected_id_order.size());
- ASSERT_EQ(truncated_size, output.size());
+ ASSERT_EQ(truncated_size, output_set.Size());
for (size_t i = 0; i < truncated_size; ++i) {
- ASSERT_EQ(expected_id_order[i], output[i])
+ ASSERT_EQ(expected_id_order[i], output_set.GetCommitIdAt(i))
<< "At index " << i << " with batch size limited to " << limit;
}
sessions::OrderedCommitSet::Projection proj;
- proj = command.ordered_commit_set_->GetCommitIdProjection(GROUP_PASSIVE);
+ proj = output_set.GetCommitIdProjection(GROUP_PASSIVE);
ASSERT_EQ(truncated_size, proj.size());
for (size_t i = 0; i < truncated_size; ++i) {
SCOPED_TRACE(::testing::Message("Projection mismatch with i = ") << i);
- syncable::Id projected =
- command.ordered_commit_set_->GetCommitIdAt(proj[i]);
+ syncable::Id projected = output_set.GetCommitIdAt(proj[i]);
ASSERT_EQ(expected_id_order[proj[i]], projected);
// Since this projection is the identity, the following holds.
ASSERT_EQ(expected_id_order[i], projected);
@@ -734,9 +728,6 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // We remove any unready entries from the status controller's unsynced
- // handles, so this should remain 0 even though the entries didn't commit.
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Nothing should have commited due to bookmarks being encrypted and
// the cryptographer having pending keys. A would have been resolved
// as a simple conflict, but still be unsynced until the next sync cycle.
@@ -751,8 +742,6 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // 2 unsynced handles to reflect the items that committed succesfully.
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
// All properly encrypted and non-conflicting items should commit. "A" was
// conflicting, but last sync cycle resolved it as simple conflict, so on
// this sync cycle it committed succesfullly.
@@ -780,9 +769,9 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareNudge();
{
- // We attempted to commit two items.
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
+ const StatusController& status_controller = session_->status_controller();
+ // Expect success.
+ EXPECT_EQ(status_controller.last_post_commit_result(), SYNCER_OK);
// None should be unsynced anymore.
ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
@@ -831,7 +820,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Initial state. Everything is normal.
ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
@@ -852,7 +840,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// All should be unapplied due to being undecryptable and have a valid
// BASE_SERVER_SPECIFICS.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -873,7 +860,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_pref);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Items 1, 2, and 4 should have newer server versions, 3 remains the same.
// All should remain unapplied due to be undecryptable.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -892,7 +878,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
encrypted_bookmark);
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
// Items 1 is now unencrypted, so should have applied normally.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -928,7 +913,6 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
}
SyncShareNudge();
{
- EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Item 1 remains unsynced due to there being pending keys.
// Items 2, 3, 4 should remain unsynced since they were not up to date.
ReadTransaction rtrans(FROM_HERE, directory());
@@ -945,31 +929,23 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
}
// First cycle resolves conflicts, second cycle commits changes.
SyncShareNudge();
- EXPECT_EQ(2, session_->status_controller().syncer_status().
- num_server_overwrites);
- EXPECT_EQ(1, session_->status_controller().syncer_status().
- num_local_overwrites);
- // We attempted to commit item 1.
- EXPECT_EQ(1U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
- SyncShareNudge();
- {
- // Everything should be resolved now. The local changes should have
- // overwritten the server changes for 2 and 4, while the server changes
- // overwrote the local for entry 3.
- // We attempted to commit two handles.
- EXPECT_EQ(0, session_->status_controller().syncer_status().
- num_server_overwrites);
- EXPECT_EQ(0, session_->status_controller().syncer_status().
- num_local_overwrites);
- EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
- EXPECT_TRUE(session_->status_controller().did_commit_items());
- ReadTransaction rtrans(FROM_HERE, directory());
- VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
- VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
- VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
- VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
- }
+ EXPECT_EQ(2, status().syncer_status().num_server_overwrites);
+ EXPECT_EQ(1, status().syncer_status().num_local_overwrites);
+ // We successfully commited item(s).
+ EXPECT_EQ(status().last_post_commit_result(), SYNCER_OK);
+ SyncShareNudge();
+
+ // Everything should be resolved now. The local changes should have
+ // overwritten the server changes for 2 and 4, while the server changes
+ // overwrote the local for entry 3.
+ EXPECT_EQ(0, status().syncer_status().num_server_overwrites);
+ EXPECT_EQ(0, status().syncer_status().num_local_overwrites);
+ EXPECT_EQ(status().last_post_commit_result(), SYNCER_OK);
+ ReadTransaction rtrans(FROM_HERE, directory());
+ VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
}
#undef VERIFY_ENTRY
@@ -1198,7 +1174,6 @@ TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
}
SyncShareNudge();
- EXPECT_EQ(2u, status().unsynced_handles().size());
ASSERT_EQ(2u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1242,7 +1217,6 @@ TEST_F(SyncerTest, TestPurgeWhileUnsynced) {
syncable::ModelTypeSet(syncable::PREFERENCES));
SyncShareNudge();
- EXPECT_EQ(2U, status().unsynced_handles().size());
ASSERT_EQ(2U, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1481,7 +1455,6 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
}
SyncShareNudge();
- EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
// It will treat these like moves.
@@ -1549,7 +1522,6 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
}
SyncShareNudge();
- EXPECT_EQ(6u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1588,7 +1560,6 @@ TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1634,7 +1605,6 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -1705,7 +1675,6 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
}
SyncShareNudge();
- EXPECT_EQ(3u, session_->status_controller().unsynced_handles().size());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
@@ -2332,8 +2301,9 @@ TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
mock_server_->SetMidCommitCallback(
base::Bind(&EntryCreatedInNewFolderTest::CreateFolderInBob,
base::Unretained(this)));
- syncer_->SyncShare(session_.get(), BUILD_COMMIT_REQUEST, SYNCER_END);
- EXPECT_EQ(1u, mock_server_->committed_ids().size());
+ syncer_->SyncShare(session_.get(), COMMIT, SYNCER_END);
+ // We loop until no unsynced handles remain, so we will commit both ids.
+ EXPECT_EQ(2u, mock_server_->committed_ids().size());
{
ReadTransaction trans(FROM_HERE, directory());
Entry parent_entry(&trans, syncable::GET_BY_ID,
@@ -2345,7 +2315,7 @@ TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
Entry child(&trans, syncable::GET_BY_ID, child_id);
ASSERT_TRUE(child.good());
EXPECT_EQ(parent_entry.Get(ID), child.Get(PARENT_ID));
-}
+ }
}
TEST_F(SyncerTest, NegativeIDInUpdate) {
@@ -2691,9 +2661,12 @@ TEST_F(SyncerTest, NameCollidingFolderSwapWorksFine) {
saw_syncer_event_ = false;
}
-TEST_F(SyncerTest, CommitManyItemsInOneGo) {
- uint32 max_batches = 3;
- uint32 items_to_commit = kDefaultMaxCommitBatchSize * max_batches;
+// Committing more than kDefaultMaxCommitBatchSize items requires that
+// we post more than one commit command to the server. This test makes
+// sure that scenario works as expected.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_Success) {
+ uint32 num_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
{
WriteTransaction trans(FROM_HERE, UNITTEST, directory());
for (uint32 i = 0; i < items_to_commit; i++) {
@@ -2705,12 +2678,71 @@ TEST_F(SyncerTest, CommitManyItemsInOneGo) {
e.Put(SPECIFICS, DefaultBookmarkSpecifics());
}
}
- uint32 num_loops = 0;
- while (SyncShareNudge()) {
- num_loops++;
- ASSERT_LT(num_loops, max_batches * 2);
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ EXPECT_FALSE(SyncShareNudge());
+ EXPECT_EQ(num_batches, mock_server_->commit_messages().size());
+ EXPECT_EQ(0, directory()->unsynced_entity_count());
+}
+
+// Test that a single failure to contact the server will cause us to exit the
+// commit loop immediately.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_PostBufferFail) {
+ uint32 num_batches = 3;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = base::StringPrintf("%d", i);
+ string name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ e.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
}
- EXPECT_GE(mock_server_->commit_messages().size(), max_batches);
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ // The second commit should fail. It will be preceded by one successful
+ // GetUpdate and one succesful commit.
+ mock_server_->FailNthPostBufferToPathCall(3);
+ SyncShareNudge();
+
+ EXPECT_EQ(1U, mock_server_->commit_messages().size());
+ EXPECT_FALSE(session_->Succeeded());
+ EXPECT_EQ(SYNC_SERVER_ERROR,
+ session_->status_controller().error().last_post_commit_result);
+ EXPECT_EQ(items_to_commit - kDefaultMaxCommitBatchSize,
+ directory()->unsynced_entity_count());
+}
+
+// Test that a single conflict response from the server will cause us to exit
+// the commit loop immediately.
+TEST_F(SyncerTest, CommitManyItemsInOneGo_CommitConflict) {
+ uint32 num_batches = 2;
+ uint32 items_to_commit = kDefaultMaxCommitBatchSize * num_batches;
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ for (uint32 i = 0; i < items_to_commit; i++) {
+ string nameutf8 = base::StringPrintf("%d", i);
+ string name(nameutf8.begin(), nameutf8.end());
+ MutableEntry e(&trans, CREATE, trans.root_id(), name);
+ e.Put(IS_UNSYNCED, true);
+ e.Put(IS_DIR, true);
+ e.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ }
+ }
+ ASSERT_EQ(items_to_commit, directory()->unsynced_entity_count());
+
+ // Return a CONFLICT response for the first item.
+ mock_server_->set_conflict_n_commits(1);
+ SyncShareNudge();
+
+ // We should stop looping at the first sign of trouble.
+ EXPECT_EQ(1U, mock_server_->commit_messages().size());
+ EXPECT_FALSE(session_->Succeeded());
+ EXPECT_EQ(items_to_commit - (kDefaultMaxCommitBatchSize - 1),
+ directory()->unsynced_entity_count());
}
TEST_F(SyncerTest, HugeConflict) {
@@ -4266,18 +4298,27 @@ TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
base::Bind(&SyncerUndeletionTest::Undelete, base::Unretained(this)));
SyncShareNudge();
- // The item ought to exist as an unsynced undeletion (meaning,
- // we think that the next commit ought to be a recreation commit).
+ // We will continue to commit until all nodes are synced, so we expect
+ // that both the delete and following undelete were committed. We haven't
+ // downloaded any updates, though, so the SERVER fields will be the same
+ // as they were at the start of the cycle.
EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- ExpectUnsyncedUndeletion();
+
+ // Server fields lag behind.
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+
+ // We have committed the second (undelete) update.
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
// Now, encounter a GetUpdates corresponding to the deletion from
// the server. The undeletion should prevail again and be committed.
// None of this should trigger any conflict detection -- it is perfectly
// normal to recieve updates from our own commits.
mock_server_->SetMidCommitCallback(base::Closure());
- mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ mock_server_->AddUpdateFromLastCommit();
SyncShareNudge();
EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
diff --git a/sync/internal_api/all_status.cc b/sync/internal_api/all_status.cc
index 1f15f1e..17c885d 100644
--- a/sync/internal_api/all_status.cc
+++ b/sync/internal_api/all_status.cc
@@ -29,7 +29,6 @@ sync_api::SyncManager::Status AllStatus::CreateBlankStatus() const {
// whose values accumulate (e.g. lifetime counters like updates_received)
// are not to be cleared here.
sync_api::SyncManager::Status status = status_;
- status.unsynced_count = 0;
status.encryption_conflicts = 0;
status.hierarchy_conflicts = 0;
status.simple_conflicts = 0;
@@ -44,7 +43,6 @@ sync_api::SyncManager::Status AllStatus::CalcSyncing(
const SyncEngineEvent &event) const {
sync_api::SyncManager::Status status = CreateBlankStatus();
const sessions::SyncSessionSnapshot& snapshot = event.snapshot;
- status.unsynced_count = static_cast<int>(snapshot.unsynced_count());
status.encryption_conflicts = snapshot.num_encryption_conflicts();
status.hierarchy_conflicts = snapshot.num_hierarchy_conflicts();
status.simple_conflicts = snapshot.num_simple_conflicts();
diff --git a/sync/internal_api/js_sync_manager_observer_unittest.cc b/sync/internal_api/js_sync_manager_observer_unittest.cc
index a0e021a..31b889b 100644
--- a/sync/internal_api/js_sync_manager_observer_unittest.cc
+++ b/sync/internal_api/js_sync_manager_observer_unittest.cc
@@ -81,12 +81,10 @@ TEST_F(JsSyncManagerObserverTest, OnSyncCycleCompleted) {
download_progress_markers,
false,
true,
- 100,
8,
5,
2,
7,
- false,
sessions::SyncSourceInfo(),
false,
0,
diff --git a/sync/internal_api/sync_manager.cc b/sync/internal_api/sync_manager.cc
index f33fefa..1906185 100644
--- a/sync/internal_api/sync_manager.cc
+++ b/sync/internal_api/sync_manager.cc
@@ -721,7 +721,6 @@ SyncManager::SyncManager(const std::string& name)
SyncManager::Status::Status()
: notifications_enabled(false),
notifications_received(0),
- unsynced_count(0),
encryption_conflicts(0),
hierarchy_conflicts(0),
simple_conflicts(0),
diff --git a/sync/internal_api/sync_manager.h b/sync/internal_api/sync_manager.h
index cfc05a0..4d23d19 100644
--- a/sync/internal_api/sync_manager.h
+++ b/sync/internal_api/sync_manager.h
@@ -102,9 +102,6 @@ class SyncManager {
browser_sync::SyncProtocolError sync_protocol_error;
- // Number of unsynced items counted at the start of most recent sync cycle.
- int unsynced_count;
-
// Number of encryption conflicts counted during most recent sync cycle.
int encryption_conflicts;
diff --git a/sync/sessions/ordered_commit_set.cc b/sync/sessions/ordered_commit_set.cc
index 51a354e..c3fc5d0 100644
--- a/sync/sessions/ordered_commit_set.cc
+++ b/sync/sessions/ordered_commit_set.cc
@@ -31,8 +31,15 @@ void OrderedCommitSet::AddCommitItem(const int64 metahandle,
}
}
+const OrderedCommitSet::Projection& OrderedCommitSet::GetCommitIdProjection(
+ browser_sync::ModelSafeGroup group) const {
+ Projections::const_iterator i = projections_.find(group);
+ DCHECK(i != projections_.end());
+ return i->second;
+}
+
void OrderedCommitSet::Append(const OrderedCommitSet& other) {
- for (int i = 0; i < other.Size(); ++i) {
+ for (size_t i = 0; i < other.Size(); ++i) {
CommitItem item = other.GetCommitItemAt(i);
AddCommitItem(item.meta, item.id, item.group);
}
@@ -71,8 +78,19 @@ void OrderedCommitSet::Truncate(size_t max_size) {
}
}
+void OrderedCommitSet::Clear() {
+ inserted_metahandles_.clear();
+ commit_ids_.clear();
+ metahandle_order_.clear();
+ for (Projections::iterator it = projections_.begin();
+ it != projections_.end(); ++it) {
+ it->second.clear();
+ }
+ types_.clear();
+}
+
OrderedCommitSet::CommitItem OrderedCommitSet::GetCommitItemAt(
- const int position) const {
+ const size_t position) const {
DCHECK(position < Size());
CommitItem return_item = {metahandle_order_[position],
commit_ids_[position],
diff --git a/sync/sessions/ordered_commit_set.h b/sync/sessions/ordered_commit_set.h
index 8551c07..f8e6938 100644
--- a/sync/sessions/ordered_commit_set.h
+++ b/sync/sessions/ordered_commit_set.h
@@ -64,14 +64,17 @@ class OrderedCommitSet {
// belonging to |group|. This is useful when you need to process a commit
// response one ModelSafeGroup at a time. See GetCommitIdAt for how the
// indices contained in the returned Projection can be used.
- const Projection& GetCommitIdProjection(browser_sync::ModelSafeGroup group) {
- return projections_[group];
- }
+ const Projection& GetCommitIdProjection(
+ browser_sync::ModelSafeGroup group) const;
- int Size() const {
+ size_t Size() const {
return commit_ids_.size();
}
+ bool Empty() const {
+ return Size() == 0;
+ }
+
// Returns true iff any of the commit ids added to this set have model type
// BOOKMARKS.
bool HasBookmarkCommitId() const;
@@ -80,6 +83,9 @@ class OrderedCommitSet {
void AppendReverse(const OrderedCommitSet& other);
void Truncate(size_t max_size);
+ // Removes all entries from this set.
+ void Clear();
+
void operator=(const OrderedCommitSet& other);
private:
// A set of CommitIdProjections associated with particular ModelSafeGroups.
@@ -92,7 +98,7 @@ class OrderedCommitSet {
syncable::ModelType group;
};
- CommitItem GetCommitItemAt(const int position) const;
+ CommitItem GetCommitItemAt(const size_t position) const;
// These lists are different views of the same items; e.g they are
// isomorphic.
diff --git a/sync/sessions/ordered_commit_set_unittest.cc b/sync/sessions/ordered_commit_set_unittest.cc
index fee37bf..6310c9d 100644
--- a/sync/sessions/ordered_commit_set_unittest.cc
+++ b/sync/sessions/ordered_commit_set_unittest.cc
@@ -115,6 +115,18 @@ TEST_F(OrderedCommitSetTest, HasBookmarkCommitId) {
EXPECT_FALSE(commit_set.HasBookmarkCommitId());
}
+TEST_F(OrderedCommitSetTest, AddAndRemoveEntries) {
+ OrderedCommitSet commit_set(routes_);
+
+ ASSERT_TRUE(commit_set.Empty());
+
+ commit_set.AddCommitItem(0, ids_.NewLocalId(), syncable::AUTOFILL);
+ ASSERT_EQ(static_cast<size_t>(1), commit_set.Size());
+
+ commit_set.Clear();
+ ASSERT_TRUE(commit_set.Empty());
+}
+
} // namespace sessions
} // namespace browser_sync
diff --git a/sync/sessions/session_state.cc b/sync/sessions/session_state.cc
index d0d23fa..ba9931f 100644
--- a/sync/sessions/session_state.cc
+++ b/sync/sessions/session_state.cc
@@ -86,12 +86,10 @@ SyncSessionSnapshot::SyncSessionSnapshot()
is_share_usable_(false),
has_more_to_sync_(false),
is_silenced_(false),
- unsynced_count_(0),
num_encryption_conflicts_(0),
num_hierarchy_conflicts_(0),
num_simple_conflicts_(0),
num_server_conflicts_(0),
- did_commit_items_(false),
notifications_enabled_(false),
num_entries_(0),
retry_scheduled_(false) {
@@ -106,12 +104,10 @@ SyncSessionSnapshot::SyncSessionSnapshot(
const syncable::ModelTypePayloadMap& download_progress_markers,
bool more_to_sync,
bool is_silenced,
- int64 unsynced_count,
int num_encryption_conflicts,
int num_hierarchy_conflicts,
int num_simple_conflicts,
int num_server_conflicts,
- bool did_commit_items,
const SyncSourceInfo& source,
bool notifications_enabled,
size_t num_entries,
@@ -125,12 +121,10 @@ SyncSessionSnapshot::SyncSessionSnapshot(
download_progress_markers_(download_progress_markers),
has_more_to_sync_(more_to_sync),
is_silenced_(is_silenced),
- unsynced_count_(unsynced_count),
num_encryption_conflicts_(num_encryption_conflicts),
num_hierarchy_conflicts_(num_hierarchy_conflicts),
num_simple_conflicts_(num_simple_conflicts),
num_server_conflicts_(num_server_conflicts),
- did_commit_items_(did_commit_items),
source_(source),
notifications_enabled_(notifications_enabled),
num_entries_(num_entries),
@@ -154,8 +148,6 @@ DictionaryValue* SyncSessionSnapshot::ToValue() const {
value->SetBoolean("hasMoreToSync", has_more_to_sync_);
value->SetBoolean("isSilenced", is_silenced_);
// We don't care too much if we lose precision here, also.
- value->SetInteger("unsyncedCount",
- static_cast<int>(unsynced_count_));
value->SetInteger("numEncryptionConflicts",
num_encryption_conflicts_);
value->SetInteger("numHierarchyConflicts",
@@ -164,7 +156,6 @@ DictionaryValue* SyncSessionSnapshot::ToValue() const {
num_simple_conflicts_);
value->SetInteger("numServerConflicts",
num_server_conflicts_);
- value->SetBoolean("didCommitItems", did_commit_items_);
value->SetInteger("numEntries", num_entries_);
value->Set("source", source_.ToValue());
value->SetBoolean("notificationsEnabled", notifications_enabled_);
@@ -213,10 +204,6 @@ bool SyncSessionSnapshot::is_silenced() const {
return is_silenced_;
}
-int64 SyncSessionSnapshot::unsynced_count() const {
- return unsynced_count_;
-}
-
int SyncSessionSnapshot::num_encryption_conflicts() const {
return num_encryption_conflicts_;
}
@@ -233,10 +220,6 @@ int SyncSessionSnapshot::num_server_conflicts() const {
return num_server_conflicts_;
}
-bool SyncSessionSnapshot::did_commit_items() const {
- return did_commit_items_;
-}
-
SyncSourceInfo SyncSessionSnapshot::source() const {
return source_;
}
@@ -381,11 +364,9 @@ bool UpdateProgress::HasConflictingUpdates() const {
}
AllModelTypeState::AllModelTypeState(bool* dirty_flag)
- : unsynced_handles(dirty_flag),
- syncer_status(dirty_flag),
+ : syncer_status(dirty_flag),
error(dirty_flag),
- num_server_changes_remaining(dirty_flag, 0),
- commit_set(ModelSafeRoutingInfo()) {
+ num_server_changes_remaining(dirty_flag, 0) {
}
AllModelTypeState::~AllModelTypeState() {}
diff --git a/sync/sessions/session_state.h b/sync/sessions/session_state.h
index 3f37cd0..c2a3edc 100644
--- a/sync/sessions/session_state.h
+++ b/sync/sessions/session_state.h
@@ -23,7 +23,6 @@
#include "sync/engine/syncer_types.h"
#include "sync/engine/syncproto.h"
#include "sync/protocol/sync_protocol_error.h"
-#include "sync/sessions/ordered_commit_set.h"
#include "sync/syncable/model_type.h"
#include "sync/syncable/model_type_payload_map.h"
#include "sync/syncable/syncable.h"
@@ -114,12 +113,10 @@ class SyncSessionSnapshot {
const syncable::ModelTypePayloadMap& download_progress_markers,
bool more_to_sync,
bool is_silenced,
- int64 unsynced_count,
int num_encryption_conflicts,
int num_hierarchy_conflicts,
int num_simple_conflicts,
int num_server_conflicts,
- bool did_commit_items,
const SyncSourceInfo& source,
bool notifications_enabled,
size_t num_entries,
@@ -140,7 +137,6 @@ class SyncSessionSnapshot {
syncable::ModelTypePayloadMap download_progress_markers() const;
bool has_more_to_sync() const;
bool is_silenced() const;
- int64 unsynced_count() const;
int num_encryption_conflicts() const;
int num_hierarchy_conflicts() const;
int num_simple_conflicts() const;
@@ -161,12 +157,10 @@ class SyncSessionSnapshot {
syncable::ModelTypePayloadMap download_progress_markers_;
bool has_more_to_sync_;
bool is_silenced_;
- int64 unsynced_count_;
int num_encryption_conflicts_;
int num_hierarchy_conflicts_;
int num_simple_conflicts_;
int num_server_conflicts_;
- bool did_commit_items_;
SyncSourceInfo source_;
bool notifications_enabled_;
size_t num_entries_;
@@ -321,20 +315,15 @@ struct AllModelTypeState {
explicit AllModelTypeState(bool* dirty_flag);
~AllModelTypeState();
- // Commits for all model types are bundled together into a single message.
- ClientToServerMessage commit_message;
- ClientToServerResponse commit_response;
// We GetUpdates for some combination of types at once.
// requested_update_types stores the set of types which were requested.
syncable::ModelTypeSet updates_request_types;
ClientToServerResponse updates_response;
// Used to build the shared commit message.
- DirtyOnWrite<std::vector<int64> > unsynced_handles;
DirtyOnWrite<SyncerStatus> syncer_status;
DirtyOnWrite<ErrorCounters> error;
SyncCycleControlParameters control_params;
DirtyOnWrite<int64> num_server_changes_remaining;
- OrderedCommitSet commit_set;
};
// Grouping of all state that applies to a single ModelSafeGroup.
diff --git a/sync/sessions/session_state_unittest.cc b/sync/sessions/session_state_unittest.cc
index 011f832..7881de0 100644
--- a/sync/sessions/session_state_unittest.cc
+++ b/sync/sessions/session_state_unittest.cc
@@ -96,12 +96,10 @@ TEST_F(SessionStateTest, SyncSessionSnapshotToValue) {
const bool kHasMoreToSync = false;
const bool kIsSilenced = true;
- const int kUnsyncedCount = 1053;
const int kNumEncryptionConflicts = 1054;
const int kNumHierarchyConflicts = 1055;
const int kNumSimpleConflicts = 1056;
const int kNumServerConflicts = 1057;
- const bool kDidCommitItems = true;
SyncSourceInfo source;
scoped_ptr<DictionaryValue> expected_source_value(source.ToValue());
@@ -114,19 +112,17 @@ TEST_F(SessionStateTest, SyncSessionSnapshotToValue) {
download_progress_markers,
kHasMoreToSync,
kIsSilenced,
- kUnsyncedCount,
kNumEncryptionConflicts,
kNumHierarchyConflicts,
kNumSimpleConflicts,
kNumServerConflicts,
- kDidCommitItems,
source,
false,
0,
base::Time::Now(),
false);
scoped_ptr<DictionaryValue> value(snapshot.ToValue());
- EXPECT_EQ(16u, value->size());
+ EXPECT_EQ(14u, value->size());
ExpectDictDictionaryValue(*expected_syncer_status_value, *value,
"syncerStatus");
ExpectDictIntegerValue(kNumServerChangesRemaining, *value,
@@ -138,7 +134,6 @@ TEST_F(SessionStateTest, SyncSessionSnapshotToValue) {
*value, "downloadProgressMarkers");
ExpectDictBooleanValue(kHasMoreToSync, *value, "hasMoreToSync");
ExpectDictBooleanValue(kIsSilenced, *value, "isSilenced");
- ExpectDictIntegerValue(kUnsyncedCount, *value, "unsyncedCount");
ExpectDictIntegerValue(kNumEncryptionConflicts, *value,
"numEncryptionConflicts");
ExpectDictIntegerValue(kNumHierarchyConflicts, *value,
@@ -147,8 +142,6 @@ TEST_F(SessionStateTest, SyncSessionSnapshotToValue) {
"numSimpleConflicts");
ExpectDictIntegerValue(kNumServerConflicts, *value,
"numServerConflicts");
- ExpectDictBooleanValue(kDidCommitItems, *value,
- "didCommitItems");
ExpectDictDictionaryValue(*expected_source_value, *value, "source");
ExpectDictBooleanValue(false, *value, "notificationsEnabled");
}
diff --git a/sync/sessions/status_controller.cc b/sync/sessions/status_controller.cc
index 19fdbaf..a88d93c 100644
--- a/sync/sessions/status_controller.cc
+++ b/sync/sessions/status_controller.cc
@@ -142,13 +142,6 @@ void StatusController::set_num_successful_bookmark_commits(int value) {
shared_.syncer_status.mutate()->num_successful_bookmark_commits = value;
}
-void StatusController::set_unsynced_handles(
- const std::vector<int64>& unsynced_handles) {
- if (!operator==(unsynced_handles, shared_.unsynced_handles.value())) {
- *(shared_.unsynced_handles.mutate()) = unsynced_handles;
- }
-}
-
void StatusController::increment_num_successful_bookmark_commits() {
set_num_successful_bookmark_commits(
shared_.syncer_status.value().num_successful_bookmark_commits + 1);
@@ -180,14 +173,17 @@ void StatusController::set_last_post_commit_result(const SyncerError result) {
shared_.error.mutate()->last_post_commit_result = result;
}
+SyncerError StatusController::last_post_commit_result() const {
+ return shared_.error.value().last_post_commit_result;
+}
+
void StatusController::set_last_process_commit_response_result(
const SyncerError result) {
shared_.error.mutate()->last_process_commit_response_result = result;
}
-void StatusController::set_commit_set(const OrderedCommitSet& commit_set) {
- DCHECK(!group_restriction_in_effect_);
- shared_.commit_set = commit_set;
+SyncerError StatusController::last_process_commit_response_result() const {
+ return shared_.error.value().last_process_commit_response_result;
}
void StatusController::update_conflicts_resolved(bool resolved) {
@@ -196,9 +192,6 @@ void StatusController::update_conflicts_resolved(bool resolved) {
void StatusController::reset_conflicts_resolved() {
shared_.control_params.conflicts_resolved = false;
}
-void StatusController::set_items_committed() {
- shared_.control_params.items_committed = true;
-}
// Returns the number of updates received from the sync server.
int64 StatusController::CountUpdates() const {
@@ -210,12 +203,6 @@ int64 StatusController::CountUpdates() const {
}
}
-bool StatusController::CurrentCommitIdProjectionHasIndex(size_t index) {
- OrderedCommitSet::Projection proj =
- shared_.commit_set.GetCommitIdProjection(group_restriction_);
- return std::binary_search(proj.begin(), proj.end(), index);
-}
-
bool StatusController::HasConflictingUpdates() const {
DCHECK(!group_restriction_in_effect_)
<< "HasConflictingUpdates applies to all ModelSafeGroups";
diff --git a/sync/sessions/status_controller.h b/sync/sessions/status_controller.h
index 7e2f258..d91efec 100644
--- a/sync/sessions/status_controller.h
+++ b/sync/sessions/status_controller.h
@@ -71,18 +71,6 @@ class StatusController {
ModelSafeGroup group);
// ClientToServer messages.
- const ClientToServerMessage& commit_message() {
- return shared_.commit_message;
- }
- ClientToServerMessage* mutable_commit_message() {
- return &shared_.commit_message;
- }
- const ClientToServerResponse& commit_response() const {
- return shared_.commit_response;
- }
- ClientToServerResponse* mutable_commit_response() {
- return &shared_.commit_response;
- }
const syncable::ModelTypeSet updates_request_types() const {
return shared_.updates_request_types;
}
@@ -109,41 +97,17 @@ class StatusController {
return shared_.num_server_changes_remaining.value();
}
- // Commit path data.
- const std::vector<syncable::Id>& commit_ids() const {
- DCHECK(!group_restriction_in_effect_) << "Group restriction in effect!";
- return shared_.commit_set.GetAllCommitIds();
- }
- const OrderedCommitSet::Projection& commit_id_projection() {
+ const OrderedCommitSet::Projection& commit_id_projection(
+ const sessions::OrderedCommitSet &commit_set) {
DCHECK(group_restriction_in_effect_)
<< "No group restriction for projection.";
- return shared_.commit_set.GetCommitIdProjection(group_restriction_);
- }
- const syncable::Id& GetCommitIdAt(size_t index) {
- DCHECK(CurrentCommitIdProjectionHasIndex(index));
- return shared_.commit_set.GetCommitIdAt(index);
- }
- syncable::ModelType GetCommitModelTypeAt(size_t index) {
- DCHECK(CurrentCommitIdProjectionHasIndex(index));
- return shared_.commit_set.GetModelTypeAt(index);
- }
- syncable::ModelType GetUnrestrictedCommitModelTypeAt(size_t index) const {
- DCHECK(!group_restriction_in_effect_) << "Group restriction in effect!";
- return shared_.commit_set.GetModelTypeAt(index);
- }
- const std::vector<int64>& unsynced_handles() const {
- DCHECK(!group_restriction_in_effect_)
- << "unsynced_handles is unrestricted.";
- return shared_.unsynced_handles.value();
+ return commit_set.GetCommitIdProjection(group_restriction_);
}
// Control parameters for sync cycles.
bool conflicts_resolved() const {
return shared_.control_params.conflicts_resolved;
}
- bool did_commit_items() const {
- return shared_.control_params.items_committed;
- }
// If a GetUpdates for any data type resulted in downloading an update that
// is in conflict, this method returns true.
@@ -165,13 +129,6 @@ class StatusController {
// Returns the number of updates received from the sync server.
int64 CountUpdates() const;
- // Returns true iff any of the commit ids added during this session are
- // bookmark related, and the bookmark group restriction is in effect.
- bool HasBookmarkCommitActivity() const {
- return ActiveGroupRestrictionIncludesModel(syncable::BOOKMARKS) &&
- shared_.commit_set.HasBookmarkCommitId();
- }
-
// Returns true if the last download_updates_command received a valid
// server response.
bool download_updates_succeeded() const {
@@ -196,17 +153,13 @@ class StatusController {
return sync_start_time_;
}
- // Check whether a particular model is included by the active group
- // restriction.
- bool ActiveGroupRestrictionIncludesModel(syncable::ModelType model) const {
- if (!group_restriction_in_effect_)
- return true;
- ModelSafeRoutingInfo::const_iterator it = routing_info_.find(model);
- if (it == routing_info_.end())
- return false;
- return group_restriction() == it->second;
+ bool HasBookmarkCommitActivity() const {
+ return ActiveGroupRestrictionIncludesModel(syncable::BOOKMARKS);
}
+ SyncerError last_post_commit_result() const;
+ SyncerError last_process_commit_response_result() const;
+
// A toolbelt full of methods for updating counters and flags.
void set_num_server_changes_remaining(int64 changes_remaining);
void set_invalid_store(bool invalid_store);
@@ -217,7 +170,6 @@ class StatusController {
void increment_num_tombstone_updates_downloaded_by(int value);
void increment_num_reflected_updates_downloaded_by(int value);
void set_types_needing_local_migration(syncable::ModelTypeSet types);
- void set_unsynced_handles(const std::vector<int64>& unsynced_handles);
void increment_num_local_overwrites();
void increment_num_server_overwrites();
void set_sync_protocol_error(const SyncProtocolError& error);
@@ -225,10 +177,8 @@ class StatusController {
void set_last_post_commit_result(const SyncerError result);
void set_last_process_commit_response_result(const SyncerError result);
- void set_commit_set(const OrderedCommitSet& commit_set);
void update_conflicts_resolved(bool resolved);
void reset_conflicts_resolved();
- void set_items_committed();
void UpdateStartTime();
@@ -239,9 +189,16 @@ class StatusController {
private:
friend class ScopedModelSafeGroupRestriction;
- // Returns true iff the commit id projection for |group_restriction_|
- // references position |index| into the full set of commit ids in play.
- bool CurrentCommitIdProjectionHasIndex(size_t index);
+ // Check whether a particular model is included by the active group
+ // restriction.
+ bool ActiveGroupRestrictionIncludesModel(syncable::ModelType model) const {
+ if (!group_restriction_in_effect_)
+ return true;
+ ModelSafeRoutingInfo::const_iterator it = routing_info_.find(model);
+ if (it == routing_info_.end())
+ return false;
+ return group_restriction() == it->second;
+ }
// Returns the state, if it exists, or NULL otherwise.
const PerModelSafeGroupState* GetModelSafeGroupState(
diff --git a/sync/sessions/status_controller_unittest.cc b/sync/sessions/status_controller_unittest.cc
index 59d3919..9f38e80 100644
--- a/sync/sessions/status_controller_unittest.cc
+++ b/sync/sessions/status_controller_unittest.cc
@@ -39,29 +39,12 @@ TEST_F(StatusControllerTest, GetsDirty) {
AddSimpleConflictingItemById(syncable::Id());
}
EXPECT_TRUE(status.TestAndClearIsDirty());
-
- std::vector<int64> v;
- v.push_back(1);
- status.set_unsynced_handles(v);
- EXPECT_TRUE(status.TestAndClearIsDirty());
- std::vector<int64> v2;
- v2.push_back(1);
- status.set_unsynced_handles(v2);
- EXPECT_FALSE(status.TestAndClearIsDirty()); // Test for deep comparison.
}
TEST_F(StatusControllerTest, StaysClean) {
StatusController status(routes_);
status.update_conflicts_resolved(true);
EXPECT_FALSE(status.TestAndClearIsDirty());
-
- status.set_items_committed();
- EXPECT_FALSE(status.TestAndClearIsDirty());
-
- OrderedCommitSet commits(routes_);
- commits.AddCommitItem(0, syncable::Id(), syncable::BOOKMARKS);
- status.set_commit_set(commits);
- EXPECT_FALSE(status.TestAndClearIsDirty());
}
// This test is useful, as simple as it sounds, due to the copy-paste prone
@@ -93,11 +76,6 @@ TEST_F(StatusControllerTest, ReadYourWrites) {
for (int i = 0; i < 14; i++)
status.increment_num_successful_commits();
EXPECT_EQ(14, status.syncer_status().num_successful_commits);
-
- std::vector<int64> v;
- v.push_back(16);
- status.set_unsynced_handles(v);
- EXPECT_EQ(16, v[0]);
}
TEST_F(StatusControllerTest, HasConflictingUpdates) {
@@ -179,16 +157,9 @@ TEST_F(StatusControllerTest, Unrestricted) {
const UpdateProgress* progress =
status.GetUnrestrictedUpdateProgress(GROUP_UI);
EXPECT_FALSE(progress);
- status.mutable_commit_message();
- status.commit_response();
- status.mutable_commit_response();
- status.updates_response();
- status.mutable_updates_response();
status.error();
status.syncer_status();
status.num_server_changes_remaining();
- status.commit_ids();
- status.HasBookmarkCommitActivity();
status.download_updates_succeeded();
status.ServerSaysNothingMoreToDownload();
status.group_restriction();
diff --git a/sync/sessions/sync_session.cc b/sync/sessions/sync_session.cc
index 4c58215..85ac8cc 100644
--- a/sync/sessions/sync_session.cc
+++ b/sync/sessions/sync_session.cc
@@ -159,12 +159,10 @@ SyncSessionSnapshot SyncSession::TakeSnapshot() const {
download_progress_markers,
HasMoreToSync(),
delegate_->IsSyncingCurrentlySilenced(),
- status_controller_->unsynced_handles().size(),
status_controller_->TotalNumEncryptionConflictingItems(),
status_controller_->TotalNumHierarchyConflictingItems(),
status_controller_->TotalNumSimpleConflictingItems(),
status_controller_->TotalNumServerConflictingItems(),
- status_controller_->did_commit_items(),
source_,
context_->notifications_enabled(),
dir->GetEntriesCount(),
@@ -182,11 +180,7 @@ void SyncSession::SendEventNotification(SyncEngineEvent::EventCause cause) {
bool SyncSession::HasMoreToSync() const {
const StatusController* status = status_controller_.get();
- return ((status->commit_ids().size() < status->unsynced_handles().size()) &&
- status->syncer_status().num_successful_commits > 0) ||
- status->conflicts_resolved();
- // Or, we have conflicting updates, but we're making progress on
- // resolving them...
+ return status->conflicts_resolved();
}
const std::set<ModelSafeGroup>& SyncSession::GetEnabledGroups() const {
diff --git a/sync/sessions/sync_session_unittest.cc b/sync/sessions/sync_session_unittest.cc
index d5ce46a..cf6b293 100644
--- a/sync/sessions/sync_session_unittest.cc
+++ b/sync/sessions/sync_session_unittest.cc
@@ -192,26 +192,6 @@ TEST_F(SyncSessionTest, SetWriteTransaction) {
}
}
-TEST_F(SyncSessionTest, MoreToSyncIfUnsyncedGreaterThanCommitted) {
- // If any forward progress was made during the session, and the number of
- // unsynced handles still exceeds the number of commit ids we added, there is
- // more to sync. For example, this occurs if we had more commit ids
- // than could fit in a single commit batch.
- EXPECT_FALSE(session_->HasMoreToSync());
- OrderedCommitSet commit_set(routes_);
- commit_set.AddCommitItem(0, syncable::Id(), syncable::BOOKMARKS);
- status()->set_commit_set(commit_set);
- EXPECT_FALSE(session_->HasMoreToSync());
-
- std::vector<int64> unsynced_handles;
- unsynced_handles.push_back(1);
- unsynced_handles.push_back(2);
- status()->set_unsynced_handles(unsynced_handles);
- EXPECT_FALSE(session_->HasMoreToSync());
- status()->increment_num_successful_commits();
- EXPECT_TRUE(session_->HasMoreToSync());
-}
-
TEST_F(SyncSessionTest, MoreToDownloadIfDownloadFailed) {
status()->set_updates_request_types(ParamsMeaningAllEnabledTypes());
diff --git a/sync/sessions/test_util.cc b/sync/sessions/test_util.cc
index 387dbf6..0b1ad24 100644
--- a/sync/sessions/test_util.cc
+++ b/sync/sessions/test_util.cc
@@ -38,7 +38,6 @@ void SimulateSuccess(sessions::SyncSession* session,
ADD_FAILURE() << "Shouldn't have more to sync";
}
ASSERT_EQ(0U, session->status_controller().num_server_changes_remaining());
- ASSERT_EQ(0U, session->status_controller().unsynced_handles().size());
}
void SimulateThrottledImpl(sessions::SyncSession* session,
diff --git a/sync/sync.gyp b/sync/sync.gyp
index f10d249..8bb5c2c 100644
--- a/sync/sync.gyp
+++ b/sync/sync.gyp
@@ -41,6 +41,8 @@
'engine/cleanup_disabled_types_command.h',
'engine/clear_data_command.cc',
'engine/clear_data_command.h',
+ 'engine/commit.cc',
+ 'engine/commit.h',
'engine/conflict_resolver.cc',
'engine/conflict_resolver.h',
'engine/download_updates_command.cc',
@@ -63,8 +65,6 @@
'engine/nudge_source.h',
'engine/polling_constants.cc',
'engine/polling_constants.h',
- 'engine/post_commit_message_command.cc',
- 'engine/post_commit_message_command.h',
'engine/process_commit_response_command.cc',
'engine/process_commit_response_command.h',
'engine/process_updates_command.cc',
diff --git a/sync/syncable/syncable.h b/sync/syncable/syncable.h
index b3ed4a8..0d0699f 100644
--- a/sync/syncable/syncable.h
+++ b/sync/syncable/syncable.h
@@ -199,6 +199,8 @@ enum {
};
enum BitTemp {
+ // Not to be confused with IS_UNSYNCED, this bit is used to detect local
+ // changes to items that happen during the server Commit operation.
SYNCING = BIT_TEMPS_BEGIN,
BIT_TEMPS_END,
};
diff --git a/sync/test/engine/mock_connection_manager.cc b/sync/test/engine/mock_connection_manager.cc
index 166b27a..fbb2d39 100644
--- a/sync/test/engine/mock_connection_manager.cc
+++ b/sync/test/engine/mock_connection_manager.cc
@@ -169,6 +169,7 @@ bool MockConnectionManager::PostBufferToPath(PostBufferParams* params,
if (post.message_contents() == ClientToServerMessage::COMMIT &&
!mid_commit_callback_.is_null()) {
mid_commit_callback_.Run();
+ mid_commit_callback_.Reset();
}
if (mid_commit_observer_) {
mid_commit_observer_->Observe();
diff --git a/sync/test/engine/mock_connection_manager.h b/sync/test/engine/mock_connection_manager.h
index f603de1..66d9aa9 100644
--- a/sync/test/engine/mock_connection_manager.h
+++ b/sync/test/engine/mock_connection_manager.h
@@ -42,6 +42,7 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
browser_sync::ScopedServerStatusWatcher* watcher) OVERRIDE;
// Control of commit response.
+ // NOTE: Commit callback is invoked only once then reset.
void SetMidCommitCallback(const base::Closure& callback);
void SetMidCommitObserver(MidCommitObserver* observer);