summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-23 01:03:54 +0000
committerrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-10-23 01:03:54 +0000
commit0c141190b349aadf75bf9f91668c3260f4d7b069 (patch)
tree30655a7d367ba45f8e62004839a3a8b50850dfc1
parentd2df291d2d2e21ace2462851e385eb6d5d144969 (diff)
downloadchromium_src-0c141190b349aadf75bf9f91668c3260f4d7b069.zip
chromium_src-0c141190b349aadf75bf9f91668c3260f4d7b069.tar.gz
chromium_src-0c141190b349aadf75bf9f91668c3260f4d7b069.tar.bz2
sync: Refactor update verification and processing
Define a new function for partitioning an update response into per-type chunks and use it to refactor ProcessUpdateCommand's functions. This is part of the preparation to move towards maintaining a set of per-type objects that we can use to process updates in different ways for different types. This commit also updates the GetModelTypeFromSpecifics() so it no longer reports the type of deleted items as "UNSPECIFIED". The old behavior was implemented in a time when the server would not set the type of tombstone items. The current server will always populate that information. BUG=278484 Review URL: https://codereview.chromium.org/27765002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@230266 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--sync/engine/process_updates_command.cc61
-rw-r--r--sync/engine/process_updates_command.h14
-rw-r--r--sync/engine/syncer_types.h20
-rw-r--r--sync/syncable/model_type.cc3
-rw-r--r--sync/test/engine/mock_connection_manager.cc3
-rw-r--r--sync/tools/testserver/chromiumsync.py12
6 files changed, 60 insertions, 53 deletions
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
index d761cfc..d69c109 100644
--- a/sync/engine/process_updates_command.cc
+++ b/sync/engine/process_updates_command.cc
@@ -96,36 +96,51 @@ SyncerError ProcessUpdatesCommand::ExecuteImpl(SyncSession* session) {
sessions::StatusController* status = session->mutable_status_controller();
const sync_pb::GetUpdatesResponse& updates =
status->updates_response().get_updates();
- int update_count = updates.entries().size();
-
ModelTypeSet requested_types = GetRoutingInfoTypes(
session->context()->routing_info());
- DVLOG(1) << update_count << " entries to verify";
- for (int i = 0; i < update_count; i++) {
- const sync_pb::SyncEntity& update = updates.entries(i);
+ TypeSyncEntityMap updates_by_type;
- VerifyResult verify_result = VerifyUpdate(
- &trans, update, requested_types, session->context()->routing_info());
- status->increment_num_updates_downloaded_by(1);
- if (!UpdateContainsNewVersion(&trans, update))
- status->increment_num_reflected_updates_downloaded_by(1);
- if (update.deleted())
- status->increment_num_tombstone_updates_downloaded_by(1);
+ PartitionUpdatesByType(updates, &updates_by_type);
- if (verify_result != VERIFY_SUCCESS && verify_result != VERIFY_UNDELETE)
- continue;
-
- ServerUpdateProcessingResult process_result =
- ProcessUpdate(update, dir->GetCryptographer(&trans), &trans);
+ int update_count = updates.entries().size();
+ status->increment_num_updates_downloaded_by(update_count);
- DCHECK(process_result == SUCCESS_PROCESSED ||
- process_result == SUCCESS_STORED);
+ DVLOG(1) << update_count << " entries to verify";
+ for (TypeSyncEntityMap::iterator it = updates_by_type.begin();
+ it != updates_by_type.end(); ++it) {
+ for (SyncEntityList::iterator update_it = it->second.begin();
+ update_it != it->second.end(); ++update_it) {
+ if (!UpdateContainsNewVersion(&trans, **update_it))
+ status->increment_num_reflected_updates_downloaded_by(1);
+ if ((*update_it)->deleted())
+ status->increment_num_tombstone_updates_downloaded_by(1);
+ VerifyResult verify_result = VerifyUpdate(
+ &trans,
+ **update_it,
+ requested_types,
+ session->context()->routing_info());
+ if (verify_result != VERIFY_SUCCESS && verify_result != VERIFY_UNDELETE)
+ continue;
+ ProcessUpdate(**update_it, dir->GetCryptographer(&trans), &trans);
+ }
}
return SYNCER_OK;
}
+void ProcessUpdatesCommand::PartitionUpdatesByType(
+ const sync_pb::GetUpdatesResponse& updates,
+ TypeSyncEntityMap* updates_by_type) {
+ int update_count = updates.entries().size();
+ for (int i = 0; i < update_count; ++i) {
+ const sync_pb::SyncEntity& update = updates.entries(i);
+ ModelType type = GetModelType(update);
+ DCHECK(IsRealDataType(type));
+ (*updates_by_type)[type].push_back(&update);
+ }
+}
+
namespace {
// In the event that IDs match, but tags differ AttemptReuniteClient tag
@@ -225,7 +240,7 @@ bool ReverifyEntry(syncable::ModelNeutralWriteTransaction* trans,
} // namespace
// Process a single update. Will avoid touching global state.
-ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
+void ProcessUpdatesCommand::ProcessUpdate(
const sync_pb::SyncEntity& update,
const Cryptographer* cryptographer,
syncable::ModelNeutralWriteTransaction* const trans) {
@@ -238,7 +253,7 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// FindLocalEntryToUpdate has veto power.
if (local_id.IsNull()) {
- return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ return; // The entry has become irrelevant.
}
CreateNewEntry(trans, local_id);
@@ -250,7 +265,7 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// We need to run the Verify checks again; the world could have changed
// since we last verified.
if (!ReverifyEntry(trans, update, &target_entry)) {
- return SUCCESS_PROCESSED; // The entry has become irrelevant.
+ return; // The entry has become irrelevant.
}
// If we're repurposing an existing local entry with a new server ID,
@@ -327,7 +342,7 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
UpdateServerFieldsFromUpdate(&target_entry, update, name);
- return SUCCESS_PROCESSED;
+ return;
}
} // namespace syncer
diff --git a/sync/engine/process_updates_command.h b/sync/engine/process_updates_command.h
index 0868e98..4d7d287 100644
--- a/sync/engine/process_updates_command.h
+++ b/sync/engine/process_updates_command.h
@@ -11,6 +11,7 @@
#include "sync/engine/syncer_types.h"
namespace sync_pb {
+class GetUpdatesResponse;
class SyncEntity;
}
@@ -38,12 +39,23 @@ class SYNC_EXPORT_PRIVATE ProcessUpdatesCommand : public SyncerCommand {
virtual SyncerError ExecuteImpl(sessions::SyncSession* session) OVERRIDE;
private:
+ typedef std::vector<const sync_pb::SyncEntity*> SyncEntityList;
+ typedef std::map<ModelType, SyncEntityList> TypeSyncEntityMap;
+
+ // Given a GetUpdates response, iterates over all the returned items and
+ // divides them according to their type. Outputs a map from model types to
+ // received SyncEntities. ModelTypes that had no items in the update response
+ // will not be included in this map.
+ static void PartitionUpdatesByType(
+ const sync_pb::GetUpdatesResponse& updates,
+ TypeSyncEntityMap* updates_by_type);
+
VerifyResult VerifyUpdate(
syncable::ModelNeutralWriteTransaction* trans,
const sync_pb::SyncEntity& entry,
ModelTypeSet requested_types,
const ModelSafeRoutingInfo& routes);
- ServerUpdateProcessingResult ProcessUpdate(
+ void ProcessUpdate(
const sync_pb::SyncEntity& proto_update,
const Cryptographer* cryptographer,
syncable::ModelNeutralWriteTransaction* const trans);
diff --git a/sync/engine/syncer_types.h b/sync/engine/syncer_types.h
index ada4d82..36f3dbc 100644
--- a/sync/engine/syncer_types.h
+++ b/sync/engine/syncer_types.h
@@ -45,26 +45,6 @@ enum UpdateAttemptResponse {
CONFLICT_SIMPLE
};
-enum ServerUpdateProcessingResult {
- // Success. Update applied and stored in SERVER_* fields or dropped if
- // irrelevant.
- SUCCESS_PROCESSED,
-
- // Success. Update details stored in SERVER_* fields, but wasn't applied.
- SUCCESS_STORED,
-
- // Update is illegally inconsistent with earlier updates. e.g. A bookmark
- // becoming a folder.
- FAILED_INCONSISTENT,
-
- // Update is illegal when considered alone. e.g. broken UTF-8 in the name.
- FAILED_CORRUPT,
-
- // Only used by VerifyUpdate. Indicates that an update is valid. As
- // VerifyUpdate cannot return SUCCESS_STORED, we reuse the value.
- SUCCESS_VALID = SUCCESS_STORED
-};
-
// Different results from the verify phase will yield different methods of
// processing in the ProcessUpdates phase. The SKIP result means the entry
// doesn't go to the ProcessUpdates phase.
diff --git a/sync/syncable/model_type.cc b/sync/syncable/model_type.cc
index 3576413..aac057fd 100644
--- a/sync/syncable/model_type.cc
+++ b/sync/syncable/model_type.cc
@@ -223,9 +223,6 @@ FullModelTypeSet ToFullModelTypeSet(ModelTypeSet in) {
ModelType GetModelType(const sync_pb::SyncEntity& sync_entity) {
DCHECK(!IsRoot(sync_entity)); // Root shouldn't ever go over the wire.
- if (sync_entity.deleted())
- return UNSPECIFIED;
-
// Backwards compatibility with old (pre-specifics) protocol.
if (sync_entity.has_bookmarkdata())
return BOOKMARKS;
diff --git a/sync/test/engine/mock_connection_manager.cc b/sync/test/engine/mock_connection_manager.cc
index 009b5a1..e3b23dd 100644
--- a/sync/test/engine/mock_connection_manager.cc
+++ b/sync/test/engine/mock_connection_manager.cc
@@ -431,6 +431,9 @@ void MockConnectionManager::AddUpdateTombstone(const syncable::Id& id) {
ent->set_version(0);
ent->set_name("");
ent->set_deleted(true);
+
+ // Make sure we can still extract the ModelType from this tombstone.
+ ent->mutable_specifics()->mutable_bookmark();
}
void MockConnectionManager::SetLastUpdateDeleted() {
diff --git a/sync/tools/testserver/chromiumsync.py b/sync/tools/testserver/chromiumsync.py
index d807486..800917e 100644
--- a/sync/tools/testserver/chromiumsync.py
+++ b/sync/tools/testserver/chromiumsync.py
@@ -930,7 +930,7 @@ class SyncDataModel(object):
# tombstone. A sync server must track deleted IDs forever, since it does
# not keep track of client knowledge (there's no deletion ACK event).
if entry.deleted:
- def MakeTombstone(id_string):
+ def MakeTombstone(id_string, datatype):
"""Make a tombstone entry that will replace the entry being deleted.
Args:
@@ -939,13 +939,11 @@ class SyncDataModel(object):
A new SyncEntity reflecting the fact that the entry is deleted.
"""
# Only the ID, version and deletion state are preserved on a tombstone.
- # TODO(nick): Does the production server not preserve the type? Not
- # doing so means that tombstones cannot be filtered based on
- # requested_types at GetUpdates time.
tombstone = sync_pb2.SyncEntity()
tombstone.id_string = id_string
tombstone.deleted = True
tombstone.name = ''
+ tombstone.specifics.CopyFrom(GetDefaultEntitySpecifics(datatype))
return tombstone
def IsChild(child_id):
@@ -968,10 +966,12 @@ class SyncDataModel(object):
# Mark all children that were identified as deleted.
for child_id in child_ids:
- self._SaveEntry(MakeTombstone(child_id))
+ datatype = GetEntryType(self._entries[child_id])
+ self._SaveEntry(MakeTombstone(child_id, datatype))
# Delete entry itself.
- entry = MakeTombstone(entry.id_string)
+ datatype = GetEntryType(self._entries[entry.id_string])
+ entry = MakeTombstone(entry.id_string, datatype)
else:
# Comments in sync.proto detail how the representation of positional
# ordering works.