summaryrefslogtreecommitdiffstats
path: root/sync/engine/process_updates_command.cc
diff options
context:
space:
mode:
authorrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-16 18:14:43 +0000
committerrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-16 18:14:43 +0000
commit092ba99e28fdec0b84384e0fd59eaa3cd7b31a3b (patch)
treea3b1992d31f2a291388b7e009afd235d144d0c8f /sync/engine/process_updates_command.cc
parentec06f819c6d2cbbe67821877b4b2c46f7ceb00c3 (diff)
downloadchromium_src-092ba99e28fdec0b84384e0fd59eaa3cd7b31a3b.zip
chromium_src-092ba99e28fdec0b84384e0fd59eaa3cd7b31a3b.tar.gz
chromium_src-092ba99e28fdec0b84384e0fd59eaa3cd7b31a3b.tar.bz2
sync: Merge {Verify,Process}UpdatesCommand
The VerifyUpdatesCommand was used to ensure the validity of each update that had been delivered from the server and to prepare for the ProcessUpdatesCommand. The ProcessUpdatesCommand would then take all updates that had been successfully verified and move them into the SERVER_ fields of the sync directory. It turns out that the logic can be greatly simplified by performing both tasks within the same command. This patch does not take full advantage of the merge. This patch is intended simply to merge the two files, with the goal of performing more significant refactorings later. BUG=154654 Review URL: https://chromiumcodereview.appspot.com/11091009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162176 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/engine/process_updates_command.cc')
-rw-r--r--sync/engine/process_updates_command.cc214
1 files changed, 189 insertions, 25 deletions
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
index 0bf03f1..22d465c 100644
--- a/sync/engine/process_updates_command.cc
+++ b/sync/engine/process_updates_command.cc
@@ -32,50 +32,214 @@ using sessions::SyncSession;
using sessions::StatusController;
using sessions::UpdateProgress;
+using syncable::GET_BY_ID;
+
ProcessUpdatesCommand::ProcessUpdatesCommand() {}
ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
std::set<ModelSafeGroup> ProcessUpdatesCommand::GetGroupsToChange(
const sessions::SyncSession& session) const {
- return session.GetEnabledGroupsWithVerifiedUpdates();
+ std::set<ModelSafeGroup> groups_with_updates;
+
+ const sync_pb::GetUpdatesResponse& updates =
+ session.status_controller().updates_response().get_updates();
+ for (int i = 0; i < updates.entries().size(); i++) {
+ groups_with_updates.insert(
+ GetGroupForModelType(GetModelType(updates.entries(i)),
+ session.routing_info()));
+ }
+
+ return groups_with_updates;
+}
+
+namespace {
+
+// This function attempts to determine whether or not this update is genuinely
+// new, or if it is a reflection of one of our own commits.
+//
+// There is a known inaccuracy in its implementation. If this update ends up
+// being applied to a local item with a different ID, we will count the change
+// as being a non-reflection update. Fortunately, the server usually updates
+// our IDs correctly in its commit response, so a new ID during GetUpdate should
+// be rare.
+//
+// The only secnarios I can think of where this might happen are:
+// - We commit a new item to the server, but we don't persist the
+// server-returned new ID to the database before we shut down. On the GetUpdate
+// following the next restart, we will receive an update from the server that
+// updates its local ID.
+// - When two attempts to create an item with identical UNIQUE_CLIENT_TAG values
+// collide at the server. I have seen this in testing. When it happens, the
+// test server will send one of the clients a response to upate its local ID so
+// that both clients will refer to the item using the same ID going forward. In
+// this case, we're right to assume that the update is not a reflection.
+//
+// For more information, see FindLocalIdToUpdate().
+bool UpdateContainsNewVersion(syncable::BaseTransaction *trans,
+ const sync_pb::SyncEntity &update) {
+ int64 existing_version = -1; // The server always sends positive versions.
+ syncable::Entry existing_entry(trans, GET_BY_ID,
+ SyncableIdFromProto(update.id_string()));
+ if (existing_entry.good())
+ existing_version = existing_entry.Get(syncable::BASE_VERSION);
+
+ if (!existing_entry.good() && update.deleted()) {
+ // There are several possible explanations for this. The most common cases
+ // will be first time sync and the redelivery of deletions we've already
+ // synced, accepted, and purged from our database. In either case, the
+ // update is useless to us. Let's count them all as "not new", even though
+ // that may not always be entirely accurate.
+ return false;
+ }
+
+ if (existing_entry.good() &&
+ !existing_entry.Get(syncable::UNIQUE_CLIENT_TAG).empty() &&
+ existing_entry.Get(syncable::IS_DEL) &&
+ update.deleted()) {
+ // Unique client tags will have their version set to zero when they're
+ // deleted. The usual version comparison logic won't be able to detect
+ // reflections of these items. Instead, we assume any received tombstones
+ // are reflections. That should be correct most of the time.
+ return false;
+ }
+
+ return existing_version < update.version();
}
+} // namespace
+
SyncerError ProcessUpdatesCommand::ModelChangingExecuteImpl(
SyncSession* session) {
syncable::Directory* dir = session->context()->directory();
- const sessions::UpdateProgress* progress =
- session->status_controller().update_progress();
- if (!progress)
- return SYNCER_OK; // Nothing to do.
-
syncable::WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
- vector<sessions::VerifiedUpdate>::const_iterator it;
- for (it = progress->VerifiedUpdatesBegin();
- it != progress->VerifiedUpdatesEnd();
- ++it) {
- const sync_pb::SyncEntity& update = it->second;
- if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
+ sessions::StatusController* status = session->mutable_status_controller();
+ const sync_pb::GetUpdatesResponse& updates =
+ status->updates_response().get_updates();
+ int update_count = updates.entries().size();
+
+ ModelTypeSet requested_types = GetRoutingInfoTypes(
+ session->routing_info());
+
+ DVLOG(1) << update_count << " entries to verify";
+ for (int i = 0; i < update_count; i++) {
+ const sync_pb::SyncEntity& update = updates.entries(i);
+
+ // The current function gets executed on several different threads, but
+ // every call iterates over the same list of items that the server returned
+ // to us. We're not allowed to process items unless we're on the right
+ // thread for that type. This check will ensure we only touch the items
+ // that live on our current thread.
+ // TODO(tim): Don't allow access to objects in other ModelSafeGroups.
+ // See crbug.com/121521 .
+ ModelSafeGroup g = GetGroupForModelType(GetModelType(update),
+ session->routing_info());
+ if (g != status->group_restriction())
continue;
- switch (ProcessUpdate(update,
- dir->GetCryptographer(&trans),
- &trans)) {
- case SUCCESS_PROCESSED:
- case SUCCESS_STORED:
- break;
- default:
- NOTREACHED();
- break;
- }
+
+ VerifyResult verify_result = VerifyUpdate(&trans, update,
+ requested_types,
+ session->routing_info());
+ status->mutable_update_progress()->AddVerifyResult(verify_result, update);
+ status->increment_num_updates_downloaded_by(1);
+ if (!UpdateContainsNewVersion(&trans, update))
+ status->increment_num_reflected_updates_downloaded_by(1);
+ if (update.deleted())
+ status->increment_num_tombstone_updates_downloaded_by(1);
+
+ if (verify_result != VERIFY_SUCCESS && verify_result != VERIFY_UNDELETE)
+ continue;
+
+ ServerUpdateProcessingResult process_result =
+ ProcessUpdate(update, dir->GetCryptographer(&trans), &trans);
+
+ DCHECK(process_result == SUCCESS_PROCESSED ||
+ process_result == SUCCESS_STORED);
}
- StatusController* status = session->mutable_status_controller();
status->mutable_update_progress()->ClearVerifiedUpdates();
return SYNCER_OK;
}
namespace {
+
+// In the event that IDs match, but tags differ AttemptReuniteClient tag
+// will have refused to unify the update.
+// We should not attempt to apply it at all since it violates consistency
+// rules.
+VerifyResult VerifyTagConsistency(const sync_pb::SyncEntity& entry,
+ const syncable::MutableEntry& same_id) {
+ if (entry.has_client_defined_unique_tag() &&
+ entry.client_defined_unique_tag() !=
+ same_id.Get(syncable::UNIQUE_CLIENT_TAG)) {
+ return VERIFY_FAIL;
+ }
+ return VERIFY_UNDECIDED;
+}
+
+} // namespace
+
+VerifyResult ProcessUpdatesCommand::VerifyUpdate(
+ syncable::WriteTransaction* trans, const sync_pb::SyncEntity& entry,
+ ModelTypeSet requested_types,
+ const ModelSafeRoutingInfo& routes) {
+ syncable::Id id = SyncableIdFromProto(entry.id_string());
+ VerifyResult result = VERIFY_FAIL;
+
+ const bool deleted = entry.has_deleted() && entry.deleted();
+ const bool is_directory = IsFolder(entry);
+ const ModelType model_type = GetModelType(entry);
+
+ if (!id.ServerKnows()) {
+ LOG(ERROR) << "Illegal negative id in received updates";
+ return result;
+ }
+ {
+ const std::string name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ if (name.empty() && !deleted) {
+ LOG(ERROR) << "Zero length name in non-deleted update";
+ return result;
+ }
+ }
+
+ syncable::MutableEntry same_id(trans, GET_BY_ID, id);
+ result = VerifyNewEntry(entry, &same_id, deleted);
+
+ ModelType placement_type = !deleted ? GetModelType(entry)
+ : same_id.good() ? same_id.GetModelType() : UNSPECIFIED;
+
+ if (VERIFY_UNDECIDED == result) {
+ result = VerifyTagConsistency(entry, same_id);
+ }
+
+ if (VERIFY_UNDECIDED == result) {
+ if (deleted) {
+ // For deletes the server could send tombostones for items that
+ // the client did not request. If so ignore those items.
+ if (IsRealDataType(placement_type) &&
+ !requested_types.Has(placement_type)) {
+ result = VERIFY_SKIP;
+ } else {
+ result = VERIFY_SUCCESS;
+ }
+ }
+ }
+
+ // If we have an existing entry, we check here for updates that break
+ // consistency rules.
+ if (VERIFY_UNDECIDED == result) {
+ result = VerifyUpdateConsistency(trans, entry, &same_id,
+ deleted, is_directory, model_type);
+ }
+
+ if (VERIFY_UNDECIDED == result)
+ result = VERIFY_SUCCESS; // No news is good news.
+
+ return result; // This might be VERIFY_SUCCESS as well
+}
+
+namespace {
// Returns true if the entry is still ok to process.
bool ReverifyEntry(syncable::WriteTransaction* trans,
const sync_pb::SyncEntity& entry,
@@ -115,10 +279,10 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// We take a two step approach. First we store the entries data in the
// server fields of a local entry and then move the data to the local fields
- syncable::MutableEntry target_entry(trans, syncable::GET_BY_ID, local_id);
+ syncable::MutableEntry target_entry(trans, GET_BY_ID, local_id);
// We need to run the Verify checks again; the world could have changed
- // since VerifyUpdatesCommand.
+ // since we last verified.
if (!ReverifyEntry(trans, update, &target_entry)) {
return SUCCESS_PROCESSED; // The entry has become irrelevant.
}