diff options
author | rlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-26 00:06:47 +0000 |
---|---|---|
committer | rlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-26 00:06:47 +0000 |
commit | ce0f41696951562391ad3d1bf180c2ae08db66fb (patch) | |
tree | a502e5d771252e26df5f51477eb477696a2dc7f7 /sync/engine/get_commit_ids_command.cc | |
parent | 085e92da549d6e20df4e023687aad2c23e05aea2 (diff) | |
download | chromium_src-ce0f41696951562391ad3d1bf180c2ae08db66fb.zip chromium_src-ce0f41696951562391ad3d1bf180c2ae08db66fb.tar.gz chromium_src-ce0f41696951562391ad3d1bf180c2ae08db66fb.tar.bz2 |
sync: Loop committing items without downloading updates
Since its inception sync has required all commits to be preceded by a
GetUpdates request. This was done to try to ensure we detect and resolve
conflicts on the client, rather than having two conflicting commits
collide at the server. It could never work perfectly, but it was likely
to work in most cases and the server would bounce the commit if it
didn't.
Now we have a new server that doesn't always give us the latest version
of a node when we ask for it, especially when the request was not
solicited by the server (ie. not triggered by notification). The update
before commit is now much less likely to detect conflicts. Even worse,
the useless update requests can be surprisingly expensive in certain
scenarios.
This change allows us to avoid fetching updates between 'batches' of
commits. This should improve performance in the case where we have lots
of items to commit (ie. first time sync) and reduce load on the server.
This CL has some far-reaching effects. This is in part because I decided
to refactor the commit code, but major changes would have been required
with or without the refactor. Highlights include:
- The commit-related SyncerCommands are now paramaterized with local
variables, allowing us to remove many members from the SyncSession
classes.
- Several syncer states have been collapsed into one COMMIT state
which redirects to the new BuildAndPostCommits() function.
- The PostCommitMessageCommand had been reduced to one line, which
has now been inlined into the BuildAndPostCommits() function.
- The unsynced_handles counter has been removed for now. Following this
change, it would have always been zero unless an error was encountered
during the commit. The functions that reference it expect different
behaviour and would have been broken by this change.
- The code to put extensions activity data back into the
ExtensionsActivityMonitor in case of failure has been moved around to
avoid double-counting if we have to post many commit messages.
- A CONFLICT response from the server is now treated as a transient
error. If we had continued to treat it as a success we might risk
going into an infinite loop. See comment in code for details.
- The "purposeless anachronism" conflicting_new_folder_ids_ has been
removed.
Review URL: https://chromiumcodereview.appspot.com/10210009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139159 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/engine/get_commit_ids_command.cc')
-rw-r--r-- | sync/engine/get_commit_ids_command.cc | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/sync/engine/get_commit_ids_command.cc b/sync/engine/get_commit_ids_command.cc index ecc5cf9..a09d5d7 100644 --- a/sync/engine/get_commit_ids_command.cc +++ b/sync/engine/get_commit_ids_command.cc @@ -22,8 +22,12 @@ using sessions::OrderedCommitSet; using sessions::SyncSession; using sessions::StatusController; -GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size) - : requested_commit_batch_size_(commit_batch_size) {} +GetCommitIdsCommand::GetCommitIdsCommand( + const size_t commit_batch_size, + sessions::OrderedCommitSet* commit_set) + : requested_commit_batch_size_(commit_batch_size), + commit_set_(commit_set) { +} GetCommitIdsCommand::~GetCommitIdsCommand() {} @@ -61,17 +65,12 @@ SyncerError GetCommitIdsCommand::ExecuteImpl(SyncSession* session) { session->routing_info(), ready_unsynced_set); - StatusController* status = session->mutable_status_controller(); - syncable::Directory::UnsyncedMetaHandles ready_unsynced_vector( - ready_unsynced_set.begin(), ready_unsynced_set.end()); - status->set_unsynced_handles(ready_unsynced_vector); const vector<syncable::Id>& verified_commit_ids = - ordered_commit_set_->GetAllCommitIds(); + commit_set_->GetAllCommitIds(); for (size_t i = 0; i < verified_commit_ids.size(); i++) DVLOG(1) << "Debug commit batch result:" << verified_commit_ids[i]; - status->set_commit_set(*ordered_commit_set_.get()); return SYNCER_OK; } @@ -180,7 +179,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors( syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id); CHECK(parent.good()) << "Bad user-only parent in item path."; int64 handle = parent.Get(syncable::META_HANDLE); - if (ordered_commit_set_->HaveCommitItem(handle)) { + if (commit_set_->HaveCommitItem(handle)) { // We've already added this parent (and therefore all of its parents). // We can return early. break; @@ -188,7 +187,7 @@ bool GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors( if (!AddItemThenPredecessors(trans, ready_unsynced_set, parent, &item_dependencies)) { // There was a parent/predecessor in conflict. We return without adding - // anything to |ordered_commit_set_|. + // anything to |commit_set|. DVLOG(1) << "Parent or parent's predecessor was in conflict, omitting " << item; return false; @@ -226,7 +225,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors( const syncable::Entry& item, OrderedCommitSet* result) const { int64 item_handle = item.Get(syncable::META_HANDLE); - if (ordered_commit_set_->HaveCommitItem(item_handle)) { + if (commit_set_->HaveCommitItem(item_handle)) { // We've already added this item to the commit set, and so must have // already added the predecessors as well. return true; @@ -243,7 +242,7 @@ bool GetCommitIdsCommand::AddItemThenPredecessors( if (!prev.Get(syncable::IS_UNSYNCED)) break; int64 handle = prev.Get(syncable::META_HANDLE); - if (ordered_commit_set_->HaveCommitItem(handle)) { + if (commit_set_->HaveCommitItem(handle)) { // We've already added this item to the commit set, and so must have // already added the predecessors as well. return true; @@ -276,7 +275,7 @@ bool GetCommitIdsCommand::AddPredecessorsThenItem( } bool GetCommitIdsCommand::IsCommitBatchFull() const { - return ordered_commit_set_->Size() >= requested_commit_batch_size_; + return commit_set_->Size() >= requested_commit_batch_size_; } void GetCommitIdsCommand::AddCreatesAndMoves( @@ -287,7 +286,7 @@ void GetCommitIdsCommand::AddCreatesAndMoves( for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { int64 metahandle = *iter; - if (ordered_commit_set_->HaveCommitItem(metahandle)) + if (commit_set_->HaveCommitItem(metahandle)) continue; syncable::Entry entry(write_transaction, @@ -308,14 +307,14 @@ void GetCommitIdsCommand::AddCreatesAndMoves( ready_unsynced_set, entry, &item_dependencies)) { - ordered_commit_set_->Append(item_dependencies); + commit_set_->Append(item_dependencies); } } } // It's possible that we overcommitted while trying to expand dependent // items. If so, truncate the set down to the allowed size. - ordered_commit_set_->Truncate(requested_commit_batch_size_); + commit_set_->Truncate(requested_commit_batch_size_); } void GetCommitIdsCommand::AddDeletes( @@ -326,7 +325,7 @@ void GetCommitIdsCommand::AddDeletes( for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { int64 metahandle = *iter; - if (ordered_commit_set_->HaveCommitItem(metahandle)) + if (commit_set_->HaveCommitItem(metahandle)) continue; syncable::Entry entry(write_transaction, syncable::GET_BY_HANDLE, @@ -357,7 +356,7 @@ void GetCommitIdsCommand::AddDeletes( DVLOG(1) << "Inserting moved and deleted entry, will be missed by " << "delete roll." << entry.Get(syncable::ID); - ordered_commit_set_->AddCommitItem(metahandle, + commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID), entry.GetModelType()); } @@ -388,14 +387,14 @@ void GetCommitIdsCommand::AddDeletes( for (std::set<int64>::const_iterator iter = ready_unsynced_set.begin(); !IsCommitBatchFull() && iter != ready_unsynced_set.end(); ++iter) { int64 metahandle = *iter; - if (ordered_commit_set_->HaveCommitItem(metahandle)) + if (commit_set_->HaveCommitItem(metahandle)) continue; syncable::MutableEntry entry(write_transaction, syncable::GET_BY_HANDLE, metahandle); if (entry.Get(syncable::IS_DEL)) { syncable::Id parent_id = entry.Get(syncable::PARENT_ID); if (legal_delete_parents.count(parent_id)) { - ordered_commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID), + commit_set_->AddCommitItem(metahandle, entry.Get(syncable::ID), entry.GetModelType()); } } @@ -406,7 +405,6 @@ void GetCommitIdsCommand::BuildCommitIds( syncable::WriteTransaction* write_transaction, const ModelSafeRoutingInfo& routes, const std::set<int64>& ready_unsynced_set) { - ordered_commit_set_.reset(new OrderedCommitSet(routes)); // Commits follow these rules: // 1. Moves or creates are preceded by needed folder creates, from // root to leaf. For folders whose contents are ordered, moves |