diff options
Diffstat (limited to 'chrome/browser/sync/engine/process_updates_command.cc')
-rw-r--r-- | chrome/browser/sync/engine/process_updates_command.cc | 154 |
1 files changed, 154 insertions, 0 deletions
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc new file mode 100644 index 0000000..6e42b1b --- /dev/null +++ b/chrome/browser/sync/engine/process_updates_command.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "chrome/browser/sync/engine/process_updates_command.h" + +#include <vector> + +#include "base/basictypes.h" +#include "chrome/browser/sync/engine/syncer.h" +#include "chrome/browser/sync/engine/syncer_proto_util.h" +#include "chrome/browser/sync/engine/syncer_util.h" +#include "chrome/browser/sync/engine/syncproto.h" +#include "chrome/browser/sync/sessions/sync_session.h" +#include "chrome/browser/sync/syncable/directory_manager.h" +#include "chrome/browser/sync/syncable/syncable.h" + +using std::vector; + +namespace browser_sync { + +using sessions::SyncSession; +using sessions::StatusController; + +ProcessUpdatesCommand::ProcessUpdatesCommand() {} +ProcessUpdatesCommand::~ProcessUpdatesCommand() {} + +bool ProcessUpdatesCommand::ModelNeutralExecuteImpl(SyncSession* session) { + const GetUpdatesResponse& updates = + session->status_controller()->updates_response().get_updates(); + const int update_count = updates.entries_size(); + + // Don't bother processing updates if there were none. + return update_count != 0; +} + +void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncSession* session) { + syncable::ScopedDirLookup dir(session->context()->directory_manager(), + session->context()->account_name()); + if (!dir.good()) { + LOG(ERROR) << "Scoped dir lookup failed!"; + return; + } + + StatusController* status = session->status_controller(); + + const sessions::UpdateProgress& progress(status->update_progress()); + vector<sessions::VerifiedUpdate>::const_iterator it; + for (it = progress.VerifiedUpdatesBegin(); + it != progress.VerifiedUpdatesEnd(); + ++it) { + const sync_pb::SyncEntity& update = it->second; + + if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE) + continue; + switch (ProcessUpdate(dir, update)) { + case SUCCESS_PROCESSED: + case SUCCESS_STORED: + break; + default: + NOTREACHED(); + break; + } + } + + status->set_num_consecutive_errors(0); + + // TODO(nick): The following line makes no sense to me. + status->set_syncing(true); + return; +} + +namespace { +// Returns true if the entry is still ok to process. +bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry, + syncable::MutableEntry* same_id) { + + const bool deleted = entry.has_deleted() && entry.deleted(); + const bool is_directory = entry.IsFolder(); + const syncable::ModelType model_type = entry.GetModelType(); + + return VERIFY_SUCCESS == SyncerUtil::VerifyUpdateConsistency(trans, + entry, + same_id, + deleted, + is_directory, + model_type); +} +} // namespace + +// Process a single update. Will avoid touching global state. +ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate( + const syncable::ScopedDirLookup& dir, + const sync_pb::SyncEntity& proto_update) { + + const SyncEntity& update = *static_cast<const SyncEntity*>(&proto_update); + using namespace syncable; + syncable::Id server_id = update.id(); + const std::string name = SyncerProtoUtil::NameFromSyncEntity(update); + + WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__); + + // Look to see if there's a local item that should recieve this update, + // maybe due to a duplicate client tag or a lost commit response. + syncable::Id local_id = SyncerUtil::FindLocalIdToUpdate(&trans, update); + + // FindLocalEntryToUpdate has veto power. + if (local_id.IsNull()) { + return SUCCESS_PROCESSED; // The entry has become irrelevant. + } + + SyncerUtil::CreateNewEntry(&trans, local_id); + + // We take a two step approach. First we store the entries data in the + // server fields of a local entry and then move the data to the local fields + MutableEntry target_entry(&trans, GET_BY_ID, local_id); + + // We need to run the Verify checks again; the world could have changed + // since VerifyUpdatesCommand. + if (!ReverifyEntry(&trans, update, &target_entry)) { + return SUCCESS_PROCESSED; // The entry has become irrelevant. + } + + // If we're repurposing an existing local entry with a new server ID, + // change the ID now, after we're sure that the update can succeed. + if (local_id != server_id) { + SyncerUtil::ChangeEntryIDAndUpdateChildren(&trans, &target_entry, + server_id); + // When IDs change, versions become irrelevant. Forcing BASE_VERSION + // to zero would ensure that this update gets applied, but historically, + // that's an illegal state unless the item is using the client tag. + // Alternatively, we can force BASE_VERSION to entry.version(), but + // this has the effect of suppressing update application. + // TODO(nick): Make the treatment of these two cases consistent. + int64 new_version = target_entry.Get(UNIQUE_CLIENT_TAG).empty() ? + update.version() : 0; + target_entry.Put(BASE_VERSION, new_version); + } + + SyncerUtil::UpdateServerFieldsFromUpdate(&target_entry, update, name); + + if (target_entry.Get(SERVER_VERSION) == target_entry.Get(BASE_VERSION) && + !target_entry.Get(IS_UNSYNCED)) { + // It's largely OK if data doesn't match exactly since a future update + // will just clobber the data. Conflict resolution will overwrite and + // take one side as the winner and does not try to merge, so strict + // equality isn't necessary. + LOG_IF(ERROR, !SyncerUtil::ServerAndLocalEntriesMatch(&target_entry)) + << target_entry; + } + return SUCCESS_PROCESSED; +} + +} // namespace browser_sync |