summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-13 20:27:11 +0000
committernick@chromium.org <nick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-13 20:27:11 +0000
commit55779e6a7c90f306405003383aabab7dc1d1abc6 (patch)
tree2c9f50468b57329c06645ccc78e00d5b7cf5ced8
parenta65aa987739a241f5581b94227e49245543080d0 (diff)
downloadchromium_src-55779e6a7c90f306405003383aabab7dc1d1abc6.zip
chromium_src-55779e6a7c90f306405003383aabab7dc1d1abc6.tar.gz
chromium_src-55779e6a7c90f306405003383aabab7dc1d1abc6.tar.bz2
Fix handling of undeletion within the syncer.
Accomplished by: (a) Plumbing the const CommitMessage into process_commit_response_command.cc, so that we can know the server state better (IS_DEL is no longer authoritative if ! which spilled over into a couple test expectations. (b) Relaxing the enforced invariants that version==0 implied !ServerKnows, for items with client tags. (c) Use ID renaming instead of delete/recreate to handle collisions on the unique tag. (d) Upon deletion, version number goes to 0 so that the server knows to process the item as a recreate, if it gets undeleted. Previously, the server number would get synthesized on the client by incrementing the last known version number. (e) Add unit tests for pretty much every ordering of delete/undelete I could come up with. BUG=46769 TEST=unit tests; repeatedly uninstalled/installed extensions Review URL: http://codereview.chromium.org/2844037 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@52209 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/sync/engine/build_commit_command.cc29
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc45
-rw-r--r--chrome/browser/sync/engine/download_updates_command.cc4
-rw-r--r--chrome/browser/sync/engine/download_updates_command_unittest.cc2
-rw-r--r--chrome/browser/sync/engine/post_commit_message_command.cc6
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.cc252
-rw-r--r--chrome/browser/sync/engine/process_commit_response_command.h51
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.cc20
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util.h17
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util_unittest.cc22
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc632
-rw-r--r--chrome/browser/sync/engine/syncer_util.cc86
-rw-r--r--chrome/browser/sync/engine/syncproto.h8
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.cc4
-rw-r--r--chrome/browser/sync/sessions/status_controller.h3
-rw-r--r--chrome/browser/sync/syncable/syncable.cc28
-rw-r--r--chrome/browser/sync/syncable/syncable.h1
-rw-r--r--chrome/browser/sync/syncable/syncable_id.cc6
-rw-r--r--chrome/browser/sync/syncable/syncable_id.h5
-rw-r--r--chrome/test/sync/engine/mock_connection_manager.cc66
-rw-r--r--chrome/test/sync/engine/mock_connection_manager.h36
21 files changed, 1061 insertions, 262 deletions
diff --git a/chrome/browser/sync/engine/build_commit_command.cc b/chrome/browser/sync/engine/build_commit_command.cc
index 49f5f25..b3767e4 100644
--- a/chrome/browser/sync/engine/build_commit_command.cc
+++ b/chrome/browser/sync/engine/build_commit_command.cc
@@ -92,6 +92,8 @@ void BuildCommitCommand::ExecuteImpl(SyncSession* session) {
commit_message->set_cache_guid(
session->write_transaction()->directory()->cache_guid());
AddExtensionsActivityToMessage(session, commit_message);
+ SyncerProtoUtil::AddRequestBirthday(
+ session->write_transaction()->directory(), &message);
const vector<Id>& commit_ids = session->status_controller()->commit_ids();
for (size_t i = 0; i < commit_ids.size(); i++) {
@@ -125,8 +127,8 @@ void BuildCommitCommand::ExecuteImpl(SyncSession* session) {
meta_entry.Get(syncable::UNIQUE_CLIENT_TAG));
}
- // Deleted items with negative parent ids can be a problem so we set the
- // parent to 0. (TODO(sync): Still true in protocol?).
+ // Deleted items with server-unknown parent ids can be a problem so we set
+ // the parent to 0. (TODO(sync): Still true in protocol?).
Id new_parent_id;
if (meta_entry.Get(syncable::IS_DEL) &&
!meta_entry.Get(syncable::PARENT_ID).ServerKnows()) {
@@ -135,12 +137,12 @@ void BuildCommitCommand::ExecuteImpl(SyncSession* session) {
new_parent_id = meta_entry.Get(syncable::PARENT_ID);
}
sync_entry->set_parent_id(new_parent_id);
- // TODO(sync): Investigate all places that think transactional commits
- // actually exist.
- //
- // This is the only logic we'll need when transactional commits are moved
- // to the server. If our parent has changes, send up the old one so the
- // server can correctly deal with multiple parents.
+
+ // If our parent has changed, send up the old one so the server
+ // can correctly deal with multiple parents.
+ // TODO(nick): With the server keeping track of the primary sync parent,
+ // it should not be necessary to provide the old_parent_id: the version
+ // number should suffice.
if (new_parent_id != meta_entry.Get(syncable::SERVER_PARENT_ID) &&
0 != meta_entry.Get(syncable::BASE_VERSION) &&
syncable::CHANGES_VERSION != meta_entry.Get(syncable::BASE_VERSION)) {
@@ -149,12 +151,15 @@ void BuildCommitCommand::ExecuteImpl(SyncSession* session) {
int64 version = meta_entry.Get(syncable::BASE_VERSION);
if (syncable::CHANGES_VERSION == version || 0 == version) {
- // If this CHECK triggers during unit testing, check that we haven't
- // altered an item that's an unapplied update.
- CHECK(!id.ServerKnows()) << meta_entry;
+ // Undeletions are only supported for items that have a client tag.
+ DCHECK(!id.ServerKnows() ||
+ !meta_entry.Get(syncable::UNIQUE_CLIENT_TAG).empty())
+ << meta_entry;
+
+ // Version 0 means to create or undelete an object.
sync_entry->set_version(0);
} else {
- CHECK(id.ServerKnows()) << meta_entry;
+ DCHECK(id.ServerKnows()) << meta_entry;
sync_entry->set_version(meta_entry.Get(syncable::BASE_VERSION));
}
sync_entry->set_ctime(ClientTimeToServerTime(
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
index cb6f1f6..e24c9f9 100644
--- a/chrome/browser/sync/engine/conflict_resolver.cc
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -95,11 +95,12 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
if (!entry.Get(syncable::SERVER_IS_DEL)) {
- // TODO(chron): Should we check more fields? Since IS_UNSYNCED is
- // turned on, this is really probably enough as fields will be overwritten.
- // Check if there's no changes.
-
- // Verbose but easier to debug.
+ // This logic determines "client wins" vs. "server wins" strategy picking.
+ // TODO(nick): The current logic is arbitrary; instead, it ought to be made
+ // consistent with the ModelAssociator behavior for a datatype. It would
+ // be nice if we could route this back to ModelAssociator code to pick one
+ // of three options: CLIENT, SERVER, or MERGE. Some datatypes (autofill)
+ // are easily mergeable.
bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) ==
entry.Get(syncable::SERVER_NON_UNIQUE_NAME);
bool parent_matches = entry.Get(syncable::PARENT_ID) ==
@@ -130,17 +131,28 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
}
- // If the entry's deleted on the server, we can have a directory here.
- entry.Put(syncable::IS_UNSYNCED, true);
-
- SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
-
- MutableEntry server_update(trans, syncable::GET_BY_ID, id);
- CHECK(server_update.good());
- CHECK(server_update.Get(syncable::META_HANDLE) !=
- entry.Get(syncable::META_HANDLE))
- << server_update << entry;
-
+ // The entry is deleted on the server but still exists locally.
+ if (!entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
+ // If we've got a client-unique tag, we can undelete while retaining
+ // our present ID.
+ DCHECK_EQ(entry.Get(syncable::SERVER_VERSION), 0)
+ << "For the server to know to re-create, client-tagged items should "
+ << "revert to version 0 when server-deleted.";
+ OverwriteServerChanges(trans, &entry);
+ // Clobber the versions, just in case the above DCHECK is violated.
+ entry.Put(syncable::SERVER_VERSION, 0);
+ entry.Put(syncable::BASE_VERSION, 0);
+ } else {
+ // Otherwise, we've got to undelete by creating a new locally
+ // uncommitted entry.
+ SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
+
+ MutableEntry server_update(trans, syncable::GET_BY_ID, id);
+ CHECK(server_update.good());
+ CHECK(server_update.Get(syncable::META_HANDLE) !=
+ entry.Get(syncable::META_HANDLE))
+ << server_update << entry;
+ }
return SYNC_PROGRESS;
}
}
@@ -245,7 +257,6 @@ bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
return true;
}
-
// TODO(chron): needs unit test badly
bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
ConflictSet* conflict_set,
diff --git a/chrome/browser/sync/engine/download_updates_command.cc b/chrome/browser/sync/engine/download_updates_command.cc
index 929506c..94ee7f1 100644
--- a/chrome/browser/sync/engine/download_updates_command.cc
+++ b/chrome/browser/sync/engine/download_updates_command.cc
@@ -72,8 +72,10 @@ void DownloadUpdatesCommand::ExecuteImpl(SyncSession* session) {
get_updates->mutable_caller_info()->set_notifications_enabled(
session->context()->notifications_enabled());
+ SyncerProtoUtil::AddRequestBirthday(dir, &client_to_server_message);
+
bool ok = SyncerProtoUtil::PostClientToServerMessage(
- &client_to_server_message,
+ client_to_server_message,
&update_response,
session);
diff --git a/chrome/browser/sync/engine/download_updates_command_unittest.cc b/chrome/browser/sync/engine/download_updates_command_unittest.cc
index 1b87ad2..8df602c 100644
--- a/chrome/browser/sync/engine/download_updates_command_unittest.cc
+++ b/chrome/browser/sync/engine/download_updates_command_unittest.cc
@@ -111,6 +111,8 @@ TEST_F(DownloadUpdatesCommandTest, OldestTimestampPicked) {
dir->set_last_download_timestamp(type_k, 1000 + i);
ConfigureMockServerConnection();
+ dir->set_store_birthday(mock_server()->store_birthday());
+
syncable::ModelTypeBitSet expected_request_types;
expected_request_types[j] = true;
expected_request_types[k] = true;
diff --git a/chrome/browser/sync/engine/post_commit_message_command.cc b/chrome/browser/sync/engine/post_commit_message_command.cc
index eea5d0e..92a4cc4 100644
--- a/chrome/browser/sync/engine/post_commit_message_command.cc
+++ b/chrome/browser/sync/engine/post_commit_message_command.cc
@@ -28,11 +28,11 @@ void PostCommitMessageCommand::ExecuteImpl(sessions::SyncSession* session) {
if (!dir.good())
return;
sessions::StatusController* status = session->status_controller();
- if (!SyncerProtoUtil::PostClientToServerMessage(
- status->mutable_commit_message(), &response, session)) {
+ if (!SyncerProtoUtil::PostClientToServerMessage(status->commit_message(),
+ &response, session)) {
// None of our changes got through. Clear the SYNCING bit which was
// set to true during BuildCommitCommand, and which may still be true.
- // Not to be confused with IS_UNSYNCED. This bit is used to detect local
+ // Not to be confused with IS_UNSYNCED, this bit is used to detect local
// changes to items that happen during the server Commit operation.
status->increment_num_consecutive_errors();
syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
diff --git a/chrome/browser/sync/engine/process_commit_response_command.cc b/chrome/browser/sync/engine/process_commit_response_command.cc
index 3abe92f..e02efc9 100644
--- a/chrome/browser/sync/engine/process_commit_response_command.cc
+++ b/chrome/browser/sync/engine/process_commit_response_command.cc
@@ -33,8 +33,10 @@ using syncable::IS_DIR;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
using syncable::PARENT_ID;
+using syncable::SERVER_IS_DEL;
using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
+using syncable::SERVER_VERSION;
using syncable::SYNCER;
using syncable::SYNCING;
@@ -118,6 +120,8 @@ void ProcessCommitResponseCommand::ProcessCommitResponse(
StatusController* status = session->status_controller();
const ClientToServerResponse& response(status->commit_response());
const CommitResponse& cr = response.commit();
+ const sync_pb::CommitMessage& commit_message =
+ status->commit_message().commit();
// If we try to commit a parent and child together and the parent conflicts
// the child will have a bad parent causing an error. As this is not a
@@ -137,6 +141,7 @@ void ProcessCommitResponseCommand::ProcessCommitResponse(
for (size_t i = 0; i < proj.size(); i++) {
CommitResponse::ResponseType response_type =
ProcessSingleCommitResponse(&trans, cr.entryresponse(proj[i]),
+ commit_message.entries(proj[i]),
status->GetCommitIdAt(proj[i]),
&conflicting_new_folder_ids,
&deleted_folders);
@@ -206,6 +211,7 @@ CommitResponse::ResponseType
ProcessCommitResponseCommand::ProcessSingleCommitResponse(
syncable::WriteTransaction* trans,
const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
+ const sync_pb::SyncEntity& commit_request_entry,
const syncable::Id& pre_commit_id,
std::set<syncable::Id>* conflicting_new_folder_ids,
set<syncable::Id>* deleted_folders) {
@@ -236,6 +242,7 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
}
if (CommitResponse::CONFLICT == response) {
LOG(INFO) << "Conflict Committing: " << local_entry;
+ // TODO(nick): conflicting_new_folder_ids is a purposeless anachronism.
if (!pre_commit_id.ServerKnows() && local_entry.Get(IS_DIR)) {
conflicting_new_folder_ids->insert(pre_commit_id);
}
@@ -271,113 +278,218 @@ ProcessCommitResponseCommand::ProcessSingleCommitResponse(
LOG(WARNING) << "Server returned a zero version on a commit response.";
}
- ProcessSuccessfulCommitResponse(trans, server_entry, pre_commit_id,
- &local_entry, syncing_was_set,
- deleted_folders);
+ ProcessSuccessfulCommitResponse(commit_request_entry, server_entry,
+ pre_commit_id, &local_entry, syncing_was_set, deleted_folders);
return response;
}
-void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
- syncable::WriteTransaction* trans,
- const CommitResponse_EntryResponse& server_entry,
- const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
- bool syncing_was_set, set<syncable::Id>* deleted_folders) {
+const string& ProcessCommitResponseCommand::GetResultingPostCommitName(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response) {
+ const string& response_name =
+ SyncerProtoUtil::NameFromCommitEntryResponse(entry_response);
+ if (!response_name.empty())
+ return response_name;
+ return SyncerProtoUtil::NameFromSyncEntity(committed_entry);
+}
+
+bool ProcessCommitResponseCommand::UpdateVersionAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry) {
int64 old_version = local_entry->Get(BASE_VERSION);
- int64 new_version = server_entry.version();
+ int64 new_version = entry_response.version();
bool bad_commit_version = false;
- // TODO(sync): The !server_entry.has_id_string() clauses below were
- // introduced when working with the new protocol.
- if (!pre_commit_id.ServerKnows())
+ if (committed_entry.deleted() &&
+ !local_entry->Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
+ // If the item was deleted, and it's undeletable (uses the client tag),
+ // change the version back to zero. We must set the version to zero so
+ // that the server knows to re-create the item if it gets committed
+ // later for undeletion.
+ new_version = 0;
+ } else if (!pre_commit_id.ServerKnows()) {
bad_commit_version = 0 == new_version;
- else
+ } else {
bad_commit_version = old_version > new_version;
+ }
if (bad_commit_version) {
- LOG(ERROR) << "Bad version in commit return for " << *local_entry <<
- " new_id:" << server_entry.id() << " new_version:" <<
- server_entry.version();
- return;
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry
+ << " new_id:" << entry_response.id() << " new_version:"
+ << entry_response.version();
+ return false;
}
- if (server_entry.id() != pre_commit_id) {
+
+ // Update the base version and server version. The base version must change
+ // here, even if syncing_was_set is false; that's because local changes were
+ // on top of the successfully committed version.
+ local_entry->Put(BASE_VERSION, new_version);
+ LOG(INFO) << "Commit is changing base version of "
+ << local_entry->Get(ID) << " to: " << new_version;
+ local_entry->Put(SERVER_VERSION, new_version);
+ return true;
+}
+
+bool ProcessCommitResponseCommand::ChangeIdAfterCommit(
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry) {
+ syncable::WriteTransaction* trans = local_entry->write_transaction();
+ if (entry_response.id() != pre_commit_id) {
if (pre_commit_id.ServerKnows()) {
- // TODO(sync): In future it's possible that we'll want the opportunity
- // to do a server triggered move aside here.
- LOG(ERROR) << " ID change but not committing a new entry. " <<
- pre_commit_id << " became " << server_entry.id() << ".";
- return;
- }
- if (!server_entry.id().ServerKnows()) {
- LOG(ERROR) << " New entries id < 0." << pre_commit_id << " became " <<
- server_entry.id() << ".";
- return;
+ // The server can sometimes generate a new ID on commit; for example,
+ // when committing an undeletion.
+ LOG(INFO) << " ID changed while committing an old entry. "
+ << pre_commit_id << " became " << entry_response.id() << ".";
}
- MutableEntry same_id(trans, GET_BY_ID, server_entry.id());
+ MutableEntry same_id(trans, GET_BY_ID, entry_response.id());
// We should trap this before this function.
- CHECK(!same_id.good()) << "ID clash with id " << server_entry.id() <<
- " during commit " << same_id;
+ if (same_id.good()) {
+ LOG(ERROR) << "ID clash with id " << entry_response.id()
+ << " during commit " << same_id;
+ return false;
+ }
SyncerUtil::ChangeEntryIDAndUpdateChildren(
- trans, local_entry, server_entry.id());
- LOG(INFO) << "Changing ID to " << server_entry.id();
+ trans, local_entry, entry_response.id());
+ LOG(INFO) << "Changing ID to " << entry_response.id();
}
+ return true;
+}
- local_entry->Put(BASE_VERSION, new_version);
- LOG(INFO) << "Commit is changing base version of " <<
- local_entry->Get(ID) << " to: " << new_version;
+void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry) {
+
+ // We just committed an entry successfully, and now we want to make our view
+ // of the server state consistent with the server state. We must be careful;
+ // |entry_response| and |committed_entry| have some identically named
+ // fields. We only want to consider fields from |committed_entry| when there
+ // is not an overriding field in the |entry_response|. We do not want to
+ // update the server data from the local data in the entry -- it's possible
+ // that the local data changed during the commit, and even if not, the server
+ // has the last word on the values of several properties.
+
+ local_entry->Put(SERVER_IS_DEL, committed_entry.deleted());
+ if (committed_entry.deleted()) {
+ // Don't clobber any other fields of deleted objects.
+ return;
+ }
+
+ local_entry->Put(syncable::SERVER_IS_DIR,
+ (committed_entry.folder() ||
+ committed_entry.bookmarkdata().bookmark_folder()));
+ local_entry->Put(syncable::SERVER_SPECIFICS,
+ committed_entry.specifics());
+ local_entry->Put(syncable::SERVER_MTIME,
+ committed_entry.mtime());
+ local_entry->Put(syncable::SERVER_CTIME,
+ committed_entry.ctime());
+ local_entry->Put(syncable::SERVER_POSITION_IN_PARENT,
+ entry_response.position_in_parent());
+ // TODO(nick): The server doesn't set entry_response.server_parent_id in
+ // practice; to update SERVER_PARENT_ID appropriately here we'd need to
+ // get the post-commit ID of the parent indicated by
+ // committed_entry.parent_id_string(). That should be inferrable from the
+ // information we have, but it's a bit convoluted to pull it out directly.
+ // Getting this right is important: SERVER_PARENT_ID gets fed back into
+ // old_parent_id during the next commit.
+ local_entry->Put(syncable::SERVER_PARENT_ID,
+ local_entry->Get(syncable::PARENT_ID));
+ local_entry->Put(syncable::SERVER_NON_UNIQUE_NAME,
+ GetResultingPostCommitName(committed_entry, entry_response));
if (local_entry->Get(IS_UNAPPLIED_UPDATE)) {
- // This is possible, but very unlikely.
+ // This shouldn't happen; an unapplied update shouldn't be committed, and
+ // if it were, the commit should have failed. But if it does happen: we've
+ // just overwritten the update info, so clear the flag.
local_entry->Put(IS_UNAPPLIED_UPDATE, false);
}
+}
- if (server_entry.has_name()) {
- if (syncing_was_set) {
- PerformCommitTimeNameAside(trans, server_entry, local_entry);
- } else {
- // IS_UNSYNCED will ensure that this entry gets committed again, even if
- // we skip this name aside. IS_UNSYNCED was probably previously set, but
- // let's just set it anyway.
- local_entry->Put(IS_UNSYNCED, true);
- LOG(INFO) << "Skipping commit time name aside because" <<
- " entry was changed during commit.";
- }
+void ProcessCommitResponseCommand::OverrideClientFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry) {
+ if (committed_entry.deleted()) {
+ // If an entry's been deleted, nothing else matters.
+ DCHECK(local_entry->Get(IS_DEL));
+ return;
}
- if (syncing_was_set && server_entry.has_position_in_parent()) {
- // The server has the final say on positioning, so apply the absolute
- // position that it returns.
- local_entry->Put(SERVER_POSITION_IN_PARENT,
- server_entry.position_in_parent());
+ // Update the name.
+ const string& server_name =
+ GetResultingPostCommitName(committed_entry, entry_response);
+ const string& old_name =
+ local_entry->Get(syncable::NON_UNIQUE_NAME);
+
+ if (!server_name.empty() && old_name != server_name) {
+ LOG(INFO) << "During commit, server changed name: " << old_name
+ << " to new name: " << server_name;
+ local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
+ }
+
+ // The server has the final say on positioning, so apply the absolute
+ // position that it returns.
+ if (entry_response.has_position_in_parent()) {
+ // The SERVER_ field should already have been written.
+ DCHECK_EQ(entry_response.position_in_parent(),
+ local_entry->Get(SERVER_POSITION_IN_PARENT));
// We just committed successfully, so we assume that the position
// value we got applies to the PARENT_ID we submitted.
syncable::Id new_prev = SyncerUtil::ComputePrevIdFromServerPosition(
- trans, local_entry, local_entry->Get(PARENT_ID));
+ local_entry->write_transaction(), local_entry,
+ local_entry->Get(PARENT_ID));
if (!local_entry->PutPredecessor(new_prev)) {
LOG(WARNING) << "PutPredecessor failed after successful commit";
}
}
+}
+void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
+ bool syncing_was_set, set<syncable::Id>* deleted_folders) {
+ DCHECK(local_entry->Get(IS_UNSYNCED));
+
+ // Update SERVER_VERSION and BASE_VERSION.
+ if (!UpdateVersionAfterCommit(committed_entry, entry_response, pre_commit_id,
+ local_entry)) {
+ LOG(ERROR) << "Bad version in commit return for " << *local_entry
+ << " new_id:" << entry_response.id() << " new_version:"
+ << entry_response.version();
+ return;
+ }
+
+ // If the server gave us a new ID, apply it.
+ if (!ChangeIdAfterCommit(entry_response, pre_commit_id, local_entry)) {
+ return;
+ }
+
+ // Update our stored copy of the server state.
+ UpdateServerFieldsAfterCommit(committed_entry, entry_response, local_entry);
+
+ // If the item doesn't need to be committed again (a situation that
+ // happens if it changed locally during the commit), we can remove
+ // it from the unsynced list. Also, we should change the locally-
+ // visible properties to apply any canonicalizations or fixups
+ // that the server introduced during the commit.
if (syncing_was_set) {
+ OverrideClientFieldsAfterCommit(committed_entry, entry_response,
+ local_entry);
local_entry->Put(IS_UNSYNCED, false);
}
+
+ // Make a note of any deleted folders, whose children would have
+ // been recursively deleted.
+ // TODO(nick): Here, commit_message.deleted() would be more correct than
+ // local_entry->Get(IS_DEL). For example, an item could be renamed, and then
+ // deleted during the commit of the rename. Unit test & fix.
if (local_entry->Get(IS_DIR) && local_entry->Get(IS_DEL)) {
deleted_folders->insert(local_entry->Get(ID));
}
}
-void ProcessCommitResponseCommand::PerformCommitTimeNameAside(
- syncable::WriteTransaction* trans,
- const CommitResponse_EntryResponse& server_entry,
- syncable::MutableEntry* local_entry) {
- string old_name = local_entry->Get(syncable::NON_UNIQUE_NAME);
- const string server_name =
- SyncerProtoUtil::NameFromCommitEntryResponse(server_entry);
-
- if (!server_name.empty() && old_name != server_name) {
- LOG(INFO) << "Server commit moved aside entry: " << old_name
- << " to new name " << server_name;
- // Should be safe since we're in a "commit lock."
- local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
- }
-}
-
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/process_commit_response_command.h b/chrome/browser/sync/engine/process_commit_response_command.h
index 351db1c..96253d8 100644
--- a/chrome/browser/sync/engine/process_commit_response_command.h
+++ b/chrome/browser/sync/engine/process_commit_response_command.h
@@ -32,24 +32,59 @@ class ProcessCommitResponseCommand : public ModelChangingSyncerCommand {
private:
CommitResponse::ResponseType ProcessSingleCommitResponse(
syncable::WriteTransaction* trans,
- const sync_pb::CommitResponse_EntryResponse& pb_server_entry,
- const syncable::Id& pre_commit_id, std::set<syncable::Id>*
- conflicting_new_directory_ids,
+ const sync_pb::CommitResponse_EntryResponse& pb_commit_response,
+ const sync_pb::SyncEntity& pb_committed_entry,
+ const syncable::Id& pre_commit_id,
+ std::set<syncable::Id>* conflicting_new_directory_ids,
std::set<syncable::Id>* deleted_folders);
// Actually does the work of execute.
void ProcessCommitResponse(sessions::SyncSession* session);
- void ProcessSuccessfulCommitResponse(syncable::WriteTransaction* trans,
- const CommitResponse_EntryResponse& server_entry,
+ void ProcessSuccessfulCommitResponse(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
const syncable::Id& pre_commit_id, syncable::MutableEntry* local_entry,
bool syncing_was_set, std::set<syncable::Id>* deleted_folders);
- void PerformCommitTimeNameAside(
- syncable::WriteTransaction* trans,
- const CommitResponse_EntryResponse& server_entry,
+ // Update the BASE_VERSION and SERVER_VERSION, post-commit.
+ // Helper for ProcessSuccessfulCommitResponse.
+ bool UpdateVersionAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry);
+
+ // If the server generated an ID for us during a commit, apply the new ID.
+ // Helper for ProcessSuccessfulCommitResponse.
+ bool ChangeIdAfterCommit(
+ const CommitResponse_EntryResponse& entry_response,
+ const syncable::Id& pre_commit_id,
+ syncable::MutableEntry* local_entry);
+
+ // Update the SERVER_ fields to reflect the server state after committing.
+ // Helper for ProcessSuccessfulCommitResponse.
+ void UpdateServerFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
+ syncable::MutableEntry* local_entry);
+
+ // The server can override some values during a commit; the overridden values
+ // are returned as fields in the CommitResponse_EntryResponse. This method
+ // stores the fields back in the client-visible (i.e. not the SERVER_* fields)
+ // fields of the entry. This should only be done if the item did not change
+ // locally while the commit was in flight.
+ // Helper for ProcessSuccessfulCommitResponse.
+ void OverrideClientFieldsAfterCommit(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response,
syncable::MutableEntry* local_entry);
+ // Helper to extract the final name from the protobufs.
+ const string& GetResultingPostCommitName(
+ const sync_pb::SyncEntity& committed_entry,
+ const CommitResponse_EntryResponse& entry_response);
+
DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommand);
};
diff --git a/chrome/browser/sync/engine/syncer_proto_util.cc b/chrome/browser/sync/engine/syncer_proto_util.cc
index b54362c..76e163a 100644
--- a/chrome/browser/sync/engine/syncer_proto_util.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util.cc
@@ -119,11 +119,11 @@ void SyncerProtoUtil::AddRequestBirthday(syncable::Directory* dir,
// static
bool SyncerProtoUtil::PostAndProcessHeaders(ServerConnectionManager* scm,
AuthWatcher* auth_watcher,
- ClientToServerMessage* msg,
+ const ClientToServerMessage& msg,
ClientToServerResponse* response) {
std::string tx, rx;
- msg->SerializeToString(&tx);
+ msg.SerializeToString(&tx);
HttpResponse http_response;
ServerConnectionManager::PostBufferParams params = {
@@ -165,10 +165,16 @@ bool SyncerProtoUtil::PostAndProcessHeaders(ServerConnectionManager* scm,
}
// static
-bool SyncerProtoUtil::PostClientToServerMessage(ClientToServerMessage* msg,
- ClientToServerResponse* response, SyncSession* session) {
+bool SyncerProtoUtil::PostClientToServerMessage(
+ const ClientToServerMessage& msg,
+ ClientToServerResponse* response,
+ SyncSession* session) {
CHECK(response);
+ DCHECK(msg.has_store_birthday() || (msg.has_get_updates() &&
+ msg.get_updates().has_from_timestamp() &&
+ msg.get_updates().from_timestamp() == 0))
+ << "Must call AddRequestBirthday to set birthday.";
ScopedDirLookup dir(session->context()->directory_manager(),
session->context()->account_name());
@@ -176,8 +182,6 @@ bool SyncerProtoUtil::PostClientToServerMessage(ClientToServerMessage* msg,
return false;
}
- AddRequestBirthday(dir, msg);
-
if (!PostAndProcessHeaders(session->context()->connection_manager(),
session->context()->auth_watcher(),
msg,
@@ -287,10 +291,10 @@ void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
// static
const std::string& SyncerProtoUtil::NameFromSyncEntity(
- const SyncEntity& entry) {
+ const sync_pb::SyncEntity& entry) {
if (entry.has_non_unique_name()) {
- return entry.non_unique_name();
+ return entry.non_unique_name();
}
return entry.name();
diff --git a/chrome/browser/sync/engine/syncer_proto_util.h b/chrome/browser/sync/engine/syncer_proto_util.h
index fd3089f..ae668ed 100644
--- a/chrome/browser/sync/engine/syncer_proto_util.h
+++ b/chrome/browser/sync/engine/syncer_proto_util.h
@@ -42,7 +42,8 @@ class SyncerProtoUtil {
// Returns true on success. Also handles store birthday verification:
// session->status()->syncer_stuck_ is set true if the birthday is
// incorrect. A false value will always be returned if birthday is bad.
- static bool PostClientToServerMessage(ClientToServerMessage* msg,
+ static bool PostClientToServerMessage(
+ const ClientToServerMessage& msg,
sync_pb::ClientToServerResponse* response,
sessions::SyncSession* session);
@@ -66,8 +67,8 @@ class SyncerProtoUtil {
std::string* proto_bytes);
// Extract the name field from a sync entity.
- static const std::string& NameFromSyncEntity(const SyncEntity& entry);
-
+ static const std::string& NameFromSyncEntity(
+ const sync_pb::SyncEntity& entry);
// Extract the name field from a commit entry response.
static const std::string& NameFromCommitEntryResponse(
@@ -87,6 +88,10 @@ class SyncerProtoUtil {
// to have a smaller footprint than the protobuf's built-in pretty printer.
static std::string SyncEntityDebugString(const sync_pb::SyncEntity& entry);
+ // Pull the birthday from the dir and put it into the msg.
+ static void AddRequestBirthday(syncable::Directory* dir,
+ ClientToServerMessage* msg);
+
private:
SyncerProtoUtil() {}
@@ -97,15 +102,11 @@ class SyncerProtoUtil {
static bool VerifyResponseBirthday(syncable::Directory* dir,
const sync_pb::ClientToServerResponse* response);
- // Pull the birthday from the dir and put it into the msg.
- static void AddRequestBirthday(syncable::Directory* dir,
- ClientToServerMessage* msg);
-
// Post the message using the scm, and do some processing on the returned
// headers. Decode the server response.
static bool PostAndProcessHeaders(browser_sync::ServerConnectionManager* scm,
browser_sync::AuthWatcher* authwatcher,
- ClientToServerMessage* msg,
+ const ClientToServerMessage& msg,
sync_pb::ClientToServerResponse* response);
friend class SyncerProtoUtilTest;
diff --git a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
index cfdcca6..b33cb48 100644
--- a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
@@ -87,11 +87,6 @@ TEST(SyncerProtoUtil, NameExtractionOneName) {
const std::string name_a =
SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
EXPECT_EQ(one_name_string, name_a);
-
- const std::string name_b =
- SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
- EXPECT_EQ(one_name_string, name_b);
- EXPECT_TRUE(name_a == name_b);
}
TEST(SyncerProtoUtil, NameExtractionOneUniqueName) {
@@ -106,11 +101,6 @@ TEST(SyncerProtoUtil, NameExtractionOneUniqueName) {
const std::string name_a =
SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
EXPECT_EQ(one_name_string, name_a);
-
- const std::string name_b =
- SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
- EXPECT_EQ(one_name_string, name_b);
- EXPECT_TRUE(name_a == name_b);
}
// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the name
@@ -132,12 +122,6 @@ TEST(SyncerProtoUtil, NameExtractionTwoNames) {
const std::string name_a =
SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
EXPECT_EQ(neuro, name_a);
-
- const std::string name_b =
- SyncerProtoUtil::NameFromCommitEntryResponse(two_name_response);
- EXPECT_EQ(neuro, name_b);
-
- EXPECT_TRUE(name_a == name_b);
}
class SyncerProtoUtilTest : public testing::Test {
@@ -237,15 +221,15 @@ TEST_F(SyncerProtoUtilTest, PostAndProcessHeaders) {
dcm.set_send_error(true);
EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- &msg, &response));
+ msg, &response));
dcm.set_send_error(false);
EXPECT_TRUE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- &msg, &response));
+ msg, &response));
dcm.set_access_denied(true);
EXPECT_FALSE(SyncerProtoUtil::PostAndProcessHeaders(&dcm, NULL,
- &msg, &response));
+ msg, &response));
}
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 685a7ba..f0a8cb0 100644
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -84,6 +84,7 @@ using syncable::SERVER_VERSION;
using syncable::UNIQUE_CLIENT_TAG;
using syncable::UNIQUE_SERVER_TAG;
using syncable::SPECIFICS;
+using syncable::SYNCING;
using syncable::UNITTEST;
using sessions::ConflictProgress;
@@ -391,6 +392,51 @@ class SyncerTest : public testing::Test,
mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
}
+ template<typename FieldType, typename ValueType>
+ ValueType GetField(int64 metahandle, FieldType field,
+ ValueType default_value) const {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_HANDLE, metahandle);
+ EXPECT_TRUE(entry.good());
+ if (!entry.good()) {
+ return default_value;
+ }
+ EXPECT_EQ(metahandle, entry.Get(META_HANDLE));
+ return entry.Get(field);
+ }
+
+ // Helper getters that work without a transaction, to reduce boilerplate.
+ Id Get(int64 metahandle, syncable::IdField field) const {
+ return GetField(metahandle, field, syncable::kNullId);
+ }
+
+ string Get(int64 metahandle, syncable::StringField field) const {
+ return GetField(metahandle, field, string());
+ }
+
+ int64 Get(int64 metahandle, syncable::Int64Field field) const {
+ return GetField(metahandle, field, syncable::kInvalidMetaHandle);
+ }
+
+ int64 Get(int64 metahandle, syncable::BaseVersion field) const {
+ const int64 kDefaultValue = -100;
+ return GetField(metahandle, field, kDefaultValue);
+ }
+
+ bool Get(int64 metahandle, syncable::IndexedBitField field) const {
+ return GetField(metahandle, field, false);
+ }
+
+ bool Get(int64 metahandle, syncable::IsDelField field) const {
+ return GetField(metahandle, field, false);
+ }
+
+ bool Get(int64 metahandle, syncable::BitField field) const {
+ return GetField(metahandle, field, false);
+ }
+
// Some ids to aid tests. Only the root one's value is specific. The rest
// are named for test clarity.
// TODO(chron): Get rid of these inbuilt IDs. They only make it
@@ -1640,7 +1686,8 @@ class EntryCreatedInNewFolderTest : public SyncerTest {
TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
+ ASSERT_TRUE(dir.good());
+ dir->set_store_birthday(mock_server_->store_birthday());
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, syncable::CREATE, trans.root_id(),
@@ -1709,7 +1756,7 @@ TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
}
// Run the syncer.
for (int i = 0 ; i < 30 ; ++i) {
- syncer_->SyncShare(this);
+ syncer_->SyncShare(this);
}
}
@@ -1789,8 +1836,12 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
version = entry.Get(BASE_VERSION);
server_position_in_parent = entry.Get(SERVER_POSITION_IN_PARENT);
}
- mock_server_->AddUpdateDirectory(id, root_id_, "Pete", version, 10);
- mock_server_->SetLastUpdatePosition(server_position_in_parent);
+ sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
+ EXPECT_EQ("Pete", update->name());
+ EXPECT_EQ(id.GetServerId(), update->id_string());
+ EXPECT_EQ(root_id_.GetServerId(), update->parent_id_string());
+ EXPECT_EQ(version, update->version());
+ EXPECT_EQ(server_position_in_parent, update->position_in_parent());
syncer_->SyncShare(this);
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -2547,7 +2598,6 @@ TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
syncer_events_.clear();
}
-
TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2604,44 +2654,39 @@ TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
class FolderMoveDeleteRenameTest : public SyncerTest {
public:
- FolderMoveDeleteRenameTest() : move_bob_count_(0), done_(false) {}
+ FolderMoveDeleteRenameTest() : done_(false) {}
static const int64 bob_id_number = 1;
static const int64 fred_id_number = 2;
void MoveBobIntoID2Runner() {
if (!done_) {
- done_ = MoveBobIntoID2();
+ MoveBobIntoID2();
+ done_ = true;
}
}
protected:
- int move_bob_count_;
- bool done_;
-
- bool MoveBobIntoID2() {
+ void MoveBobIntoID2() {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- if (--move_bob_count_ > 0) {
- return false;
- }
-
- if (move_bob_count_ == 0) {
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- Entry alice(&trans, GET_BY_ID,
- TestIdFactory::FromNumber(fred_id_number));
+ Entry alice(&trans, GET_BY_ID,
+ TestIdFactory::FromNumber(fred_id_number));
CHECK(alice.good());
- CHECK(!alice.Get(IS_DEL));
- MutableEntry bob(&trans, GET_BY_ID,
- TestIdFactory::FromNumber(bob_id_number));
+ EXPECT_TRUE(!alice.Get(IS_DEL));
+ EXPECT_TRUE(alice.Get(SYNCING)) << "Expected to be called mid-commit.";
+ MutableEntry bob(&trans, GET_BY_ID,
+ TestIdFactory::FromNumber(bob_id_number));
CHECK(bob.good());
bob.Put(IS_UNSYNCED, true);
+
+ bob.Put(SYNCING, false);
bob.Put(PARENT_ID, alice.Get(ID));
- return true;
}
- return false;
-}
+
+ bool done_;
};
TEST_F(FolderMoveDeleteRenameTest,
@@ -2664,6 +2709,7 @@ TEST_F(FolderMoveDeleteRenameTest,
MutableEntry fred(&trans, GET_BY_ID, fred_id);
ASSERT_TRUE(fred.good());
fred.Put(IS_UNSYNCED, true);
+ fred.Put(SYNCING, false);
fred.Put(NON_UNIQUE_NAME, "Alice");
}
mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
@@ -2673,7 +2719,6 @@ TEST_F(FolderMoveDeleteRenameTest,
// This test is a little brittle. We want to move the item into the folder
// such that we think we're dealing with a simple conflict, but in reality
// it's actually a conflict set.
- move_bob_count_ = 2;
mock_server_->SetMidCommitCallback(
NewCallback<FolderMoveDeleteRenameTest>(this,
&FolderMoveDeleteRenameTest::MoveBobIntoID2Runner));
@@ -3981,29 +4026,38 @@ TEST_F(SyncerTest, ClientTagIllegalUpdateIgnored) {
}
}
-TEST_F(SyncerTest, ClientTagClientCreatedConflictUpdate) {
+TEST_F(SyncerTest, ClientTagUncommittedTagMatchesUpdate) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
- int64 original_metahandle;
+ int64 original_metahandle = 0;
+
+ sync_pb::EntitySpecifics local_bookmark(DefaultBookmarkSpecifics());
+ local_bookmark.MutableExtension(sync_pb::bookmark)->
+ set_url("http://foo/localsite");
+ sync_pb::EntitySpecifics server_bookmark(DefaultBookmarkSpecifics());
+ server_bookmark.MutableExtension(sync_pb::bookmark)->
+ set_url("http://bar/serversite");
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
ASSERT_TRUE(perm_folder.good());
perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
- perm_folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ perm_folder.Put(SPECIFICS, local_bookmark);
perm_folder.Put(IS_UNSYNCED, true);
EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
EXPECT_FALSE(perm_folder.Get(ID).ServerKnows());
original_metahandle = perm_folder.Get(META_HANDLE);
}
- mock_server_->AddUpdateDirectory(1, 0, "permitem_renamed", 10, 100);
+ mock_server_->AddUpdateBookmark(1, 0, "permitem_renamed", 10, 100);
mock_server_->SetLastUpdateClientTag("clientperm");
+ mock_server_->GetMutableLastUpdate()->mutable_specifics()->
+ CopyFrom(server_bookmark);
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare(this);
- // This should cause client tag overwrite.
+ // This should cause client tag reunion, preserving the metahandle.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
@@ -4011,20 +4065,43 @@ TEST_F(SyncerTest, ClientTagClientCreatedConflictUpdate) {
ASSERT_TRUE(perm_folder.good());
EXPECT_FALSE(perm_folder.Get(IS_DEL));
EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
- EXPECT_EQ(perm_folder.Get(BASE_VERSION), 10);
- // Entry should have been moved aside.
- EXPECT_NE(perm_folder.Get(META_HANDLE), original_metahandle);
- EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "clientperm");
- EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem_renamed");
+ EXPECT_TRUE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_EQ(10, perm_folder.Get(BASE_VERSION));
+ // Entry should have been given the new ID while preserving the
+ // metahandle; client should have won the conflict resolution.
+ EXPECT_EQ(original_metahandle, perm_folder.Get(META_HANDLE));
+ EXPECT_EQ("clientperm", perm_folder.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ("clientname", perm_folder.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(local_bookmark.SerializeAsString(),
+ perm_folder.Get(SPECIFICS).SerializeAsString());
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
+ }
+
+ mock_server_->set_conflict_all_commits(false);
+ syncer_->SyncShare(this);
- Entry moved_aside(&trans, GET_BY_HANDLE, original_metahandle);
- EXPECT_TRUE(moved_aside.good());
- EXPECT_TRUE(moved_aside.Get(IS_DEL));
+ // The resolved entry ought to commit cleanly.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+
+ Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
+ ASSERT_TRUE(perm_folder.good());
+ EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_TRUE(10 < perm_folder.Get(BASE_VERSION));
+ // Entry should have been given the new ID while preserving the
+ // metahandle; client should have won the conflict resolution.
+ EXPECT_EQ(original_metahandle, perm_folder.Get(META_HANDLE));
+ EXPECT_EQ("clientperm", perm_folder.Get(UNIQUE_CLIENT_TAG));
+ EXPECT_EQ("clientname", perm_folder.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(local_bookmark.SerializeAsString(),
+ perm_folder.Get(SPECIFICS).SerializeAsString());
+ EXPECT_TRUE(perm_folder.Get(ID).ServerKnows());
}
}
-TEST_F(SyncerTest, ClientTagOverwitesDeletedClientEntry) {
+TEST_F(SyncerTest, ClientTagConflictWithDeletedLocalEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
@@ -4032,7 +4109,9 @@ TEST_F(SyncerTest, ClientTagOverwitesDeletedClientEntry) {
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
ASSERT_TRUE(perm_folder.good());
+ ASSERT_FALSE(perm_folder.Get(ID).ServerKnows());
perm_folder.Put(UNIQUE_CLIENT_TAG, "clientperm");
+ perm_folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
perm_folder.Put(IS_UNSYNCED, true);
perm_folder.Put(IS_DEL, true);
}
@@ -4048,12 +4127,12 @@ TEST_F(SyncerTest, ClientTagOverwitesDeletedClientEntry) {
Entry perm_folder(&trans, GET_BY_CLIENT_TAG, "clientperm");
ASSERT_TRUE(perm_folder.good());
- EXPECT_FALSE(perm_folder.Get(IS_DEL));
+ ASSERT_TRUE(perm_folder.Get(ID).ServerKnows());
+ EXPECT_TRUE(perm_folder.Get(IS_DEL));
EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(perm_folder.Get(IS_UNSYNCED));
+ EXPECT_TRUE(perm_folder.Get(IS_UNSYNCED));
EXPECT_EQ(perm_folder.Get(BASE_VERSION), 10);
EXPECT_EQ(perm_folder.Get(UNIQUE_CLIENT_TAG), "clientperm");
- EXPECT_TRUE(perm_folder.Get(NON_UNIQUE_NAME) == "permitem_renamed");
}
}
@@ -4139,6 +4218,473 @@ TEST_F(SyncerTest, GetUpdatesSetsRequestedTypes) {
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
}
+// Test what happens if a client deletes, then recreates, an object very
+// quickly. It is possible that the deletion gets sent as a commit, and
+// the undelete happens during the commit request. The principle here
+// is that with a single committing client, conflicts should never
+// be encountered, and a client encountering its past actions during
+// getupdates should never feed back to override later actions.
+//
+// In cases of ordering A-F below, the outcome should be the same.
+// Exercised by UndeleteDuringCommit:
+// A. Delete - commit - undelete - commitresponse.
+// B. Delete - commit - undelete - commitresponse - getupdates.
+// Exercised by UndeleteBeforeCommit:
+// C. Delete - undelete - commit - commitresponse.
+// D. Delete - undelete - commit - commitresponse - getupdates.
+// Exercised by UndeleteAfterCommit:
+// E. Delete - commit - commitresponse - undelete - commit
+// - commitresponse.
+// F. Delete - commit - commitresponse - undelete - commit -
+// - commitresponse - getupdates.
+class SyncerUndeletionTest : public SyncerTest {
+ public:
+ SyncerUndeletionTest()
+ : client_tag_("foobar"),
+ metahandle_(syncable::kInvalidMetaHandle) {
+ }
+
+ void Create() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry perm_folder(&trans, CREATE, ids_.root(), "clientname");
+ ASSERT_TRUE(perm_folder.good());
+ perm_folder.Put(UNIQUE_CLIENT_TAG, client_tag_);
+ perm_folder.Put(IS_UNSYNCED, true);
+ perm_folder.Put(SYNCING, false);
+ perm_folder.Put(SPECIFICS, DefaultBookmarkSpecifics());
+ EXPECT_FALSE(perm_folder.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(perm_folder.Get(ID).ServerKnows());
+ metahandle_ = perm_folder.Get(META_HANDLE);
+ }
+
+ void Delete() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(metahandle_, entry.Get(META_HANDLE));
+ entry.Put(IS_DEL, true);
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(SYNCING, false);
+ }
+
+ void Undelete() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ ASSERT_TRUE(entry.good());
+ EXPECT_EQ(metahandle_, entry.Get(META_HANDLE));
+ EXPECT_TRUE(entry.Get(IS_DEL));
+ entry.Put(IS_DEL, false);
+ entry.Put(IS_UNSYNCED, true);
+ entry.Put(SYNCING, false);
+ }
+
+ int64 GetMetahandleOfTag() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ EXPECT_TRUE(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_CLIENT_TAG, client_tag_);
+ EXPECT_TRUE(entry.good());
+ if (!entry.good()) {
+ return syncable::kInvalidMetaHandle;
+ }
+ return entry.Get(META_HANDLE);
+ }
+
+ void ExpectUnsyncedCreation() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL)); // Never been committed.
+ EXPECT_GE(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ }
+
+ void ExpectUnsyncedUndeletion() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_EQ(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(Get(metahandle_, ID).ServerKnows());
+ }
+
+ void ExpectUnsyncedEdit() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(Get(metahandle_, ID).ServerKnows());
+ }
+
+ void ExpectUnsyncedDeletion() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_TRUE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_LT(0, Get(metahandle_, SERVER_VERSION));
+ }
+
+ void ExpectSyncedAndCreated() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_FALSE(Get(metahandle_, IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_LT(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_EQ(Get(metahandle_, BASE_VERSION), Get(metahandle_, SERVER_VERSION));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ }
+
+ void ExpectSyncedAndDeleted() {
+ EXPECT_EQ(metahandle_, GetMetahandleOfTag());
+ EXPECT_TRUE(Get(metahandle_, IS_DEL));
+ EXPECT_TRUE(Get(metahandle_, SERVER_IS_DEL));
+ EXPECT_FALSE(Get(metahandle_, IS_UNSYNCED));
+ EXPECT_FALSE(Get(metahandle_, IS_UNAPPLIED_UPDATE));
+ EXPECT_GE(0, Get(metahandle_, BASE_VERSION));
+ EXPECT_GE(0, Get(metahandle_, SERVER_VERSION));
+ }
+
+ protected:
+ const std::string client_tag_;
+ int64 metahandle_;
+};
+
+TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete, begin committing the delete, then undelete while committing.
+ Delete();
+ ExpectUnsyncedDeletion();
+ mock_server_->SetMidCommitCallback(
+ NewCallback<SyncerUndeletionTest>(this,
+ &SyncerUndeletionTest::Undelete));
+ syncer_->SyncShare(this);
+
+ // The item ought to exist as an unsynced undeletion (meaning,
+ // we think that the next commit ought to be a recreation commit).
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the deletion from
+ // the server. The undeletion should prevail again and be committed.
+ // None of this should trigger any conflict detection -- it is perfectly
+ // normal to recieve updates from our own commits.
+ mock_server_->SetMidCommitCallback(NULL);
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and undelete, then sync to pick up the result.
+ Delete();
+ ExpectUnsyncedDeletion();
+ Undelete();
+ ExpectUnsyncedEdit(); // Edit, not undelete: server thinks it exists.
+ syncer_->SyncShare(this);
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ(2, Get(metahandle_, BASE_VERSION));
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // update.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and commit.
+ Delete();
+ ExpectUnsyncedDeletion();
+ syncer_->SyncShare(this);
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Before the GetUpdates, the item is locally undeleted.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Delete and commit.
+ Delete();
+ ExpectUnsyncedDeletion();
+ syncer_->SyncShare(this);
+
+ // The item ought to have committed successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. Should be consistent.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // After the GetUpdates, the item is locally undeleted.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+// Test processing of undeletion GetUpdateses.
+TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Add a delete from the server.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Some other client deletes the item.
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ syncer_->SyncShare(this);
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Undelete it locally.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Some other client deletes the item before we get a chance
+ // to GetUpdates our original request.
+ mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
+ syncer_->SyncShare(this);
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Undelete it locally.
+ Undelete();
+ ExpectUnsyncedUndeletion();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update. The undeletion should prevail.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+}
+
+TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Get the updates of our just-committed entry.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // We delete the item.
+ Delete();
+ ExpectUnsyncedDeletion();
+ syncer_->SyncShare(this);
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Now, encounter a GetUpdates corresponding to the just-committed
+ // deletion update.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Some other client undeletes the item.
+ mock_server_->AddUpdateBookmark(Get(metahandle_, ID),
+ Get(metahandle_, PARENT_ID),
+ "Thadeusz", 100, 1000);
+ mock_server_->SetLastUpdateClientTag(client_tag_);
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
+}
+
+TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
+ StatusController* status = session_->status_controller();
+
+ Create();
+ ExpectUnsyncedCreation();
+ syncer_->SyncShare(this);
+
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // Get the updates of our just-committed entry.
+ mock_server_->AddUpdateFromLastCommit();
+ syncer_->SyncShare(this);
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ ExpectSyncedAndCreated();
+
+ // We delete the item.
+ Delete();
+ ExpectUnsyncedDeletion();
+ syncer_->SyncShare(this);
+
+ // The update ought to have applied successfully.
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndDeleted();
+
+ // Some other client undeletes before we see the update from our
+ // commit.
+ mock_server_->AddUpdateBookmark(Get(metahandle_, ID),
+ Get(metahandle_, PARENT_ID),
+ "Thadeusz", 100, 1000);
+ mock_server_->SetLastUpdateClientTag(client_tag_);
+ syncer_->SyncShare(this);
+ EXPECT_EQ(0, status->TotalNumConflictingItems());
+ EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
+ ExpectSyncedAndCreated();
+ EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
+}
+
+// A group of tests exercising the syncer's handling of sibling ordering, as
+// represented in the sync protocol.
class SyncerPositionUpdateTest : public SyncerTest {
public:
SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
index 8b5f7b9..3133a337 100644
--- a/chrome/browser/sync/engine/syncer_util.cc
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -132,6 +132,10 @@ void SyncerUtil::ChangeEntryIDAndUpdateChildren(
// static
void SyncerUtil::AttemptReuniteClientTag(syncable::WriteTransaction* trans,
const SyncEntity& server_entry) {
+ if (!server_entry.has_client_defined_unique_tag() ||
+ server_entry.client_defined_unique_tag().empty()) {
+ return;
+ }
// Expected entry points of this function:
// SyncEntity has NOT been applied to SERVER fields.
@@ -148,9 +152,9 @@ void SyncerUtil::AttemptReuniteClientTag(syncable::WriteTransaction* trans,
// work just fine. Update will end up in the proper entry, via ID lookup.
// Case 2 - Should never happen. We'd need to change the
// ID of the local entry, we refuse. We'll skip this in VERIFY.
- // Case 3 - We need to replace the local ID with the server ID. Conflict
- // resolution must occur, but this is prior to update application! This case
- // should be rare. For now, clobber client changes entirely.
+ // Case 3 - We need to replace the local ID with the server ID so that
+ // this update gets targeted at the correct local entry; we expect conflict
+ // resolution to occur.
// Case 4 - Perfect. Same as case 1.
syncable::MutableEntry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
@@ -167,18 +171,13 @@ void SyncerUtil::AttemptReuniteClientTag(syncable::WriteTransaction* trans,
DCHECK(local_entry.Get(ID) == server_entry.id());
} else {
// Case 3: We have a local entry with the same client tag.
- // We can't have two updates with the same client tag though.
- // One of these has to go. Let's delete the client entry and move it
- // aside. This will cause a delete + create. The client API user must
- // handle this correctly. In this situation the client must have created
- // this entry but not yet committed it for the first time. Usually the
- // client probably wants the server data for this instead.
- // Other strategies to handle this are a bit flaky.
- DCHECK(local_entry.Get(IS_UNAPPLIED_UPDATE) == false);
- local_entry.Put(IS_UNSYNCED, false);
- local_entry.Put(IS_DEL, true);
- // Needs to get out of the index before our update can be put in.
- local_entry.Put(UNIQUE_CLIENT_TAG, "");
+ // We should change the ID of the local entry to the server entry.
+ // This will result in an server ID with base version == 0, but that's
+ // a legal state for an item with a client tag. By changing the ID,
+ // server_entry will now be applied to local_entry.
+ ChangeEntryIDAndUpdateChildren(trans, &local_entry, server_entry.id());
+ DCHECK(0 == local_entry.Get(BASE_VERSION) ||
+ CHANGES_VERSION == local_entry.Get(BASE_VERSION));
}
}
// Case 4: Client has no entry for tag, all green.
@@ -201,7 +200,7 @@ void SyncerUtil::AttemptReuniteLostCommitResponses(
// a reasonable span, our commit batches have to be small enough
// to process within our HTTP response "assumed alive" time.
- // We need to check if we have a that didn't get its server
+ // We need to check if we have an entry that didn't get its server
// id updated correctly. The server sends down a client ID
// and a local (negative) id. If we have a entry by that
// description, we should update the ID and version to the
@@ -210,21 +209,21 @@ void SyncerUtil::AttemptReuniteLostCommitResponses(
server_entry.originator_cache_guid() == client_id) {
syncable::Id server_id = syncable::Id::CreateFromClientString(
server_entry.originator_client_item_id());
- CHECK(!server_id.ServerKnows());
+ DCHECK(!server_id.ServerKnows());
syncable::MutableEntry local_entry(trans, GET_BY_ID, server_id);
// If it exists, then our local client lost a commit response.
if (local_entry.good() && !local_entry.Get(IS_DEL)) {
int64 old_version = local_entry.Get(BASE_VERSION);
int64 new_version = server_entry.version();
- CHECK(old_version <= 0);
- CHECK(new_version > 0);
+ DCHECK(old_version <= 0);
+ DCHECK(new_version > 0);
// Otherwise setting the base version could cause a consistency failure.
// An entry should never be version 0 and SYNCED.
- CHECK(local_entry.Get(IS_UNSYNCED));
+ DCHECK(local_entry.Get(IS_UNSYNCED));
// Just a quick sanity check.
- CHECK(!local_entry.Get(ID).ServerKnows());
+ DCHECK(!local_entry.Get(ID).ServerKnows());
LOG(INFO) << "Reuniting lost commit response IDs" <<
" server id: " << server_entry.id() << " local id: " <<
@@ -238,8 +237,8 @@ void SyncerUtil::AttemptReuniteLostCommitResponses(
// reunited its ID.
}
// !local_entry.Good() means we don't have a left behind entry for this
- // ID. We successfully committed before. In the future we should get rid
- // of this system and just have client side generated IDs as a whole.
+ // ID in sync database. This could happen if we crashed after successfully
+ // committing an item that never was flushed to disk.
}
}
@@ -352,17 +351,31 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
const SyncEntity& server_entry,
const string& name) {
if (server_entry.deleted()) {
+ if (local_entry->Get(SERVER_IS_DEL)) {
+ // If we already think the item is server-deleted, we're done.
+ // Skipping these cases prevents our committed deletions from coming
+ // back and overriding subsequent undeletions. For non-deleted items,
+ // the version number check has a similar effect.
+ return;
+ }
// The server returns very lightweight replies for deletions, so we don't
// clobber a bunch of fields on delete.
local_entry->Put(SERVER_IS_DEL, true);
- local_entry->Put(SERVER_VERSION,
- std::max(local_entry->Get(SERVER_VERSION),
- local_entry->Get(BASE_VERSION)) + 1L);
+ if (!local_entry->Get(UNIQUE_CLIENT_TAG).empty()) {
+ // Items identified by the client unique tag are undeletable; when
+ // they're deleted, they go back to version 0.
+ local_entry->Put(SERVER_VERSION, 0);
+ } else {
+ // Otherwise, fake a server version by bumping the local number.
+ local_entry->Put(SERVER_VERSION,
+ std::max(local_entry->Get(SERVER_VERSION),
+ local_entry->Get(BASE_VERSION)) + 1);
+ }
local_entry->Put(IS_UNAPPLIED_UPDATE, true);
return;
}
- CHECK(local_entry->Get(ID) == server_entry.id())
+ DCHECK(local_entry->Get(ID) == server_entry.id())
<< "ID Changing not supported here";
local_entry->Put(SERVER_PARENT_ID, server_entry.parent_id());
local_entry->Put(SERVER_NON_UNIQUE_NAME, name);
@@ -505,8 +518,8 @@ void SyncerUtil::SplitServerInformationIntoNewEntry(
void SyncerUtil::UpdateLocalDataFromServerData(
syncable::WriteTransaction* trans,
syncable::MutableEntry* entry) {
- CHECK(!entry->Get(IS_UNSYNCED));
- CHECK(entry->Get(IS_UNAPPLIED_UPDATE));
+ DCHECK(!entry->Get(IS_UNSYNCED));
+ DCHECK(entry->Get(IS_UNAPPLIED_UPDATE));
LOG(INFO) << "Updating entry : " << *entry;
// Start by setting the properties that determine the model_type.
@@ -723,9 +736,8 @@ VerifyResult SyncerUtil::VerifyUpdateConsistency(
same_id->Get(BASE_VERSION) > 0))) {
// An undelete. The latter case in the above condition is for
// when the server does not give us an update following the
- // commit of a delete, before undeleting. Undeletion is possible
- // in the server's storage backend, so it's possible on the client,
- // though not expected to be something that is commonly possible.
+ // commit of a delete, before undeleting.
+ // Undeletion is common for items that reuse the client-unique tag.
VerifyResult result =
SyncerUtil::VerifyUndelete(trans, entry, same_id);
if (VERIFY_UNDECIDED != result)
@@ -769,12 +781,22 @@ VerifyResult SyncerUtil::VerifyUpdateConsistency(
VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
const SyncEntity& entry,
syncable::MutableEntry* same_id) {
+ // TODO(nick): We hit this path for items deleted items that the server
+ // tells us to re-create; only deleted items with positive base versions
+ // will hit this path. However, it's not clear how such an undeletion
+ // would actually succeed on the server; in the protocol, a base
+ // version of 0 is required to undelete an object. This codepath
+ // should be deprecated in favor of client-tag style undeletion
+ // (where items go to version 0 when they're deleted), or else
+ // removed entirely (if this type of undeletion is indeed impossible).
CHECK(same_id->good());
LOG(INFO) << "Server update is attempting undelete. " << *same_id
<< "Update:" << SyncerProtoUtil::SyncEntityDebugString(entry);
// Move the old one aside and start over. It's too tricky to get the old one
// back into a state that would pass CheckTreeInvariants().
if (same_id->Get(IS_DEL)) {
+ DCHECK(same_id->Get(UNIQUE_CLIENT_TAG).empty())
+ << "Doing move-aside undeletion on client-tagged item.";
same_id->Put(ID, trans->directory()->NextId());
same_id->Put(UNIQUE_CLIENT_TAG, "");
same_id->Put(BASE_VERSION, CHANGES_VERSION);
diff --git a/chrome/browser/sync/engine/syncproto.h b/chrome/browser/sync/engine/syncproto.h
index 3b9acb0..b8f0a75 100644
--- a/chrome/browser/sync/engine/syncproto.h
+++ b/chrome/browser/sync/engine/syncproto.h
@@ -19,6 +19,9 @@ namespace browser_sync {
template<class Base>
class IdWrapper : public Base {
public:
+ IdWrapper() {}
+ explicit IdWrapper(const Base& other) : Base(other) {
+ }
syncable::Id id() const {
return syncable::Id::CreateFromServerId(Base::id_string());
}
@@ -31,6 +34,11 @@ class IdWrapper : public Base {
// them directly.
class SyncEntity : public IdWrapper<sync_pb::SyncEntity> {
public:
+ SyncEntity() {}
+ explicit SyncEntity(const sync_pb::SyncEntity& other)
+ : IdWrapper<sync_pb::SyncEntity>(other) {
+ }
+
void set_parent_id(const syncable::Id& id) {
set_parent_id_string(id.GetServerId());
}
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
index 1cf168e..7d6fde2 100644
--- a/chrome/browser/sync/engine/verify_updates_command.cc
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -94,10 +94,6 @@ VerifyUpdatesCommand::VerifyUpdateResult VerifyUpdatesCommand::VerifyUpdate(
LOG(ERROR) << "Illegal negative id in received updates";
return result;
}
- if (!entry.parent_id().ServerKnows()) {
- LOG(ERROR) << "Illegal parent id in received updates";
- return result;
- }
{
const std::string name = SyncerProtoUtil::NameFromSyncEntity(entry);
if (name.empty() && !deleted) {
diff --git a/chrome/browser/sync/sessions/status_controller.h b/chrome/browser/sync/sessions/status_controller.h
index c7c89dd..f1e4a28 100644
--- a/chrome/browser/sync/sessions/status_controller.h
+++ b/chrome/browser/sync/sessions/status_controller.h
@@ -73,6 +73,9 @@ class StatusController {
}
// ClientToServer messages.
+ const ClientToServerMessage& commit_message() {
+ return shared_.commit_message;
+ }
ClientToServerMessage* mutable_commit_message() {
return &shared_.commit_message;
}
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
index 2f11806..7c6cc71 100644
--- a/chrome/browser/sync/syncable/syncable.cc
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -863,23 +863,38 @@ void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
}
int64 base_version = e.Get(BASE_VERSION);
int64 server_version = e.Get(SERVER_VERSION);
+ bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
if (CHANGES_VERSION == base_version || 0 == base_version) {
if (e.Get(IS_UNAPPLIED_UPDATE)) {
- // Unapplied new item.
- CHECK(e.Get(IS_DEL)) << e;
+ // Must be a new item, or a de-duplicated unique client tag
+ // that was created both locally and remotely.
+ if (!using_unique_client_tag) {
+ CHECK(e.Get(IS_DEL)) << e;
+ }
+ // It came from the server, so it must have a server ID.
CHECK(id.ServerKnows()) << e;
} else {
if (e.Get(IS_DIR)) {
// TODO(chron): Implement this mode if clients ever need it.
// For now, you can't combine a client tag and a directory.
- CHECK(e.Get(UNIQUE_CLIENT_TAG).empty()) << e;
+ CHECK(!using_unique_client_tag) << e;
}
- // Uncommitted item.
+ // Should be an uncomitted item, or a successfully deleted one.
if (!e.Get(IS_DEL)) {
CHECK(e.Get(IS_UNSYNCED)) << e;
}
+ // If the next check failed, it would imply that an item exists
+ // on the server, isn't waiting for application locally, but either
+ // is an unsynced create or a sucessful delete in the local copy.
+ // Either way, that's a mismatch.
CHECK(0 == server_version) << e;
- CHECK(!id.ServerKnows()) << e;
+ // Items that aren't using the unique client tag should have a zero
+ // base version only if they have a local ID. Items with unique client
+ // tags are allowed to use the zero base version for undeletion and
+ // de-duplication; the unique client tag trumps the server ID.
+ if (!using_unique_client_tag) {
+ CHECK(!id.ServerKnows()) << e;
+ }
}
} else {
CHECK(id.ServerKnows());
@@ -892,9 +907,6 @@ void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
return;
}
}
- // I did intend to add a check here to ensure no entries had been pulled into
- // memory by this function, but we can't guard against another ReadTransaction
- // pulling entries into RAM
}
browser_sync::ChannelHookup<DirectoryChangeEvent>* Directory::AddChangeObserver(
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
index 8a651a9..8114f00 100644
--- a/chrome/browser/sync/syncable/syncable.h
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -422,7 +422,6 @@ class Entry {
return *kernel_;
}
-
protected: // Don't allow creation on heap, except by sync API wrappers.
friend class sync_api::ReadNode;
void* operator new(size_t size) { return (::operator new)(size); }
diff --git a/chrome/browser/sync/syncable/syncable_id.cc b/chrome/browser/sync/syncable/syncable_id.cc
index 0db300a..1aa2193 100644
--- a/chrome/browser/sync/syncable/syncable_id.cc
+++ b/chrome/browser/sync/syncable/syncable_id.cc
@@ -28,12 +28,6 @@ FastDump& operator << (FastDump& dump, const syncable::Id& id) {
namespace syncable {
-string Id::AsQueryParam() const {
- if ('s' == s_[0])
- return s_.c_str() + 1;
- return "";
-}
-
string Id::GetServerId() const {
// Currently root is the string "0". We need to decide on a true value.
// "" would be convenient here, as the IsRoot call would not be needed.
diff --git a/chrome/browser/sync/syncable/syncable_id.h b/chrome/browser/sync/syncable/syncable_id.h
index 73e26c2..ed8a2e5 100644
--- a/chrome/browser/sync/syncable/syncable_id.h
+++ b/chrome/browser/sync/syncable/syncable_id.h
@@ -84,7 +84,6 @@ class Id {
inline void Clear() {
s_ = "r";
}
- std::string AsQueryParam() const;
// Must never allow id == 0 or id < 0 to compile.
inline bool operator == (const Id& that) const {
return s_ == that.s_;
@@ -99,13 +98,13 @@ class Id {
return s_ > that.s_;
}
- public:
- // Three functions used to work with our proto buffers.
+ // Three functions are used to work with our proto buffers.
std::string GetServerId() const;
static Id CreateFromServerId(const std::string& server_id);
// This should only be used if you get back a reference to a local
// id from the server. Returns a client only opaque id.
static Id CreateFromClientString(const std::string& local_id);
+
protected:
std::string s_;
};
diff --git a/chrome/test/sync/engine/mock_connection_manager.cc b/chrome/test/sync/engine/mock_connection_manager.cc
index ac1e0f1..5aec1ba 100644
--- a/chrome/test/sync/engine/mock_connection_manager.cc
+++ b/chrome/test/sync/engine/mock_connection_manager.cc
@@ -44,7 +44,6 @@ MockConnectionManager::MockConnectionManager(DirectoryManager* dirmgr,
fail_next_postbuffer_(false),
directory_manager_(dirmgr),
directory_name_(name),
- mid_commit_callback_(NULL),
mid_commit_observer_(NULL),
throttling_(false),
fail_with_auth_invalid_(false),
@@ -57,9 +56,6 @@ MockConnectionManager::MockConnectionManager(DirectoryManager* dirmgr,
};
MockConnectionManager::~MockConnectionManager() {
- for (size_t i = 0; i < commit_messages_.size(); i++)
- delete commit_messages_[i];
- delete mid_commit_callback_;
}
void MockConnectionManager::SetCommitTimeRename(string prepend) {
@@ -67,7 +63,7 @@ void MockConnectionManager::SetCommitTimeRename(string prepend) {
}
void MockConnectionManager::SetMidCommitCallback(Closure* callback) {
- mid_commit_callback_ = callback;
+ mid_commit_callback_.reset(callback);
}
void MockConnectionManager::SetMidCommitObserver(
@@ -148,7 +144,8 @@ bool MockConnectionManager::PostBufferToPath(const PostBufferParams* params,
}
response.SerializeToString(params->buffer_out);
- if (mid_commit_callback_) {
+ if (post.message_contents() == ClientToServerMessage::COMMIT &&
+ mid_commit_callback_.get()) {
mid_commit_callback_->Run();
}
if (mid_commit_observer_) {
@@ -250,8 +247,47 @@ SyncEntity* MockConnectionManager::AddUpdateBookmark(string id,
return AddUpdateFull(id, parent_id, name, version, sync_ts, false);
}
+SyncEntity* MockConnectionManager::AddUpdateFromLastCommit() {
+ EXPECT_EQ(1, last_sent_commit().entries_size());
+ EXPECT_EQ(1, last_commit_response().entryresponse_size());
+ EXPECT_EQ(CommitResponse::SUCCESS,
+ last_commit_response().entryresponse(0).response_type());
+
+ if (last_sent_commit().entries(0).deleted()) {
+ AddUpdateTombstone(syncable::Id::CreateFromServerId(
+ last_sent_commit().entries(0).id_string()));
+ } else {
+ SyncEntity* ent = updates_.add_entries();
+ ent->CopyFrom(last_sent_commit().entries(0));
+ ent->clear_insert_after_item_id();
+ ent->clear_old_parent_id();
+ ent->set_position_in_parent(
+ last_commit_response().entryresponse(0).position_in_parent());
+ ent->set_version(
+ last_commit_response().entryresponse(0).version());
+ ent->set_id_string(
+ last_commit_response().entryresponse(0).id_string());
+ // Tests don't currently care about the following:
+ // originator_cache_guid, originator_client_item_id, parent_id_string,
+ // name, non_unique_name.
+ }
+ return GetMutableLastUpdate();
+}
+
+void MockConnectionManager::AddUpdateTombstone(const syncable::Id& id) {
+ // Tombstones have only the ID set and dummy values for the required fields.
+ SyncEntity* ent = updates_.add_entries();
+ ent->set_id_string(id.GetServerId());
+ ent->set_version(0);
+ ent->set_name("");
+ ent->set_deleted(true);
+}
+
void MockConnectionManager::SetLastUpdateDeleted() {
- GetMutableLastUpdate()->set_deleted(true);
+ // Tombstones have only the ID set. Wipe anything else.
+ string id_string = GetMutableLastUpdate()->id_string();
+ updates_.mutable_entries()->RemoveLast();
+ AddUpdateTombstone(syncable::Id::CreateFromServerId(id_string));
}
void MockConnectionManager::SetLastUpdateOriginatorFields(
@@ -368,8 +404,8 @@ void MockConnectionManager::ProcessCommit(ClientToServerMessage* csm,
map <string, string> changed_ids;
const CommitMessage& commit_message = csm->commit();
CommitResponse* commit_response = response_buffer->mutable_commit();
- commit_messages_.push_back(new CommitMessage);
- commit_messages_.back()->CopyFrom(commit_message);
+ commit_messages_->push_back(new CommitMessage);
+ commit_messages_->back()->CopyFrom(commit_message);
map<string, CommitResponse_EntryResponse*> response_map;
for (int i = 0; i < commit_message.entries_size() ; i++) {
const sync_pb::SyncEntity& entry = commit_message.entries(i);
@@ -411,6 +447,7 @@ void MockConnectionManager::ProcessCommit(ClientToServerMessage* csm,
er->set_id_string(new_id);
}
}
+ commit_responses_->push_back(new CommitResponse(*commit_response));
}
SyncEntity* MockConnectionManager::AddUpdateDirectory(
@@ -428,13 +465,18 @@ SyncEntity* MockConnectionManager::AddUpdateBookmark(
}
SyncEntity* MockConnectionManager::GetMutableLastUpdate() {
- DCHECK(updates_.entries_size() > 0);
+ EXPECT_TRUE(updates_.entries_size() > 0);
return updates_.mutable_entries()->Mutable(updates_.entries_size() - 1);
}
const CommitMessage& MockConnectionManager::last_sent_commit() const {
- DCHECK(!commit_messages_.empty());
- return *commit_messages_.back();
+ EXPECT_TRUE(!commit_messages_.empty());
+ return *commit_messages_->back();
+}
+
+const CommitResponse& MockConnectionManager::last_commit_response() const {
+ EXPECT_TRUE(!commit_responses_.empty());
+ return *commit_responses_->back();
}
void MockConnectionManager::ThrottleNextRequest(
diff --git a/chrome/test/sync/engine/mock_connection_manager.h b/chrome/test/sync/engine/mock_connection_manager.h
index 4eff4d4..be2144f 100644
--- a/chrome/test/sync/engine/mock_connection_manager.h
+++ b/chrome/test/sync/engine/mock_connection_manager.h
@@ -11,6 +11,7 @@
#include <string>
#include <vector>
+#include "base/scoped_vector.h"
#include "chrome/browser/sync/engine/net/server_connection_manager.h"
#include "chrome/browser/sync/protocol/sync.pb.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
@@ -57,6 +58,7 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
// once. This mock object doesn't simulate a changelist, it simulates server
// responses.
void ResetUpdates();
+
// Generic versions of AddUpdate functions. Tests using these function should
// compile for both the int64 and string id based versions of the server.
// The SyncEntity returned is only valid until the Sync is completed
@@ -94,6 +96,17 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
string name,
int64 version,
int64 sync_ts);
+
+ // Find the last commit sent by the client, and replay it for the next get
+ // updates command. This can be used to simulate the GetUpdates that happens
+ // immediately after a successful commit.
+ sync_pb::SyncEntity* AddUpdateFromLastCommit();
+
+ // Add a deleted item. Deletion records typically contain no
+ // additional information beyond the deletion, and no specifics.
+ // The server may send the originator fields.
+ void AddUpdateTombstone(const syncable::Id& id);
+
void SetLastUpdateDeleted();
void SetLastUpdateServerTag(const string& tag);
void SetLastUpdateClientTag(const string& tag);
@@ -134,11 +147,17 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
return committed_ids_;
}
const std::vector<sync_pb::CommitMessage*>& commit_messages() const {
- return commit_messages_;
+ return commit_messages_.get();
+ }
+ const std::vector<sync_pb::CommitResponse*>& commit_responses() const {
+ return commit_responses_.get();
}
// Retrieve the last sent commit message.
const sync_pb::CommitMessage& last_sent_commit() const;
+ // Retrieve the last returned commit response.
+ const sync_pb::CommitResponse& last_commit_response() const;
+
// Retrieve the last request submitted to the server (regardless of type).
const sync_pb::ClientToServerMessage& last_request() const {
return last_request_;
@@ -179,6 +198,11 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
void SetServerNotReachable();
+ std::string store_birthday() { return store_birthday_; }
+
+ // Locate the most recent update message for purpose of alteration.
+ sync_pb::SyncEntity* GetMutableLastUpdate();
+
private:
sync_pb::SyncEntity* AddUpdateFull(syncable::Id id, syncable::Id parentid,
string name, int64 version,
@@ -198,9 +222,6 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
void AddDefaultBookmarkData(sync_pb::SyncEntity* entity, bool is_folder);
- // Locate the most recent update message for purpose of alteration.
- sync_pb::SyncEntity* GetMutableLastUpdate();
-
// Determine if one entry in a commit should be rejected with a conflict.
bool ShouldConflictThisCommit();
@@ -224,8 +245,9 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
bool conflict_all_commits_;
int conflict_n_commits_;
- // Commit messages we've sent
- std::vector<sync_pb::CommitMessage*> commit_messages_;
+ // Commit messages we've sent, and responses we've returned.
+ ScopedVector<sync_pb::CommitMessage> commit_messages_;
+ ScopedVector<sync_pb::CommitResponse> commit_responses_;
// The next id the mock will return to a commit.
int next_new_id_;
@@ -245,7 +267,7 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
// The updates we'll return to the next request.
sync_pb::GetUpdatesResponse updates_;
- Closure* mid_commit_callback_;
+ scoped_ptr<Closure> mid_commit_callback_;
MidCommitObserver* mid_commit_observer_;
// The AUTHENTICATE response we'll return for auth requests.