diff options
author | zea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-06-07 02:30:24 +0000 |
---|---|---|
committer | zea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-06-07 02:30:24 +0000 |
commit | 1f51ad94964af7f256ac1d762a04cfac1e9935c5 (patch) | |
tree | 621e43948dbd9825f0d1ab1f501ab0a91bbe9ede /sync/syncable | |
parent | 3c372a1ae0e609691994bc74ade55175a3e2ea97 (diff) | |
download | chromium_src-1f51ad94964af7f256ac1d762a04cfac1e9935c5.zip chromium_src-1f51ad94964af7f256ac1d762a04cfac1e9935c5.tar.gz chromium_src-1f51ad94964af7f256ac1d762a04cfac1e9935c5.tar.bz2 |
[Sync] Allow enabling partial sets of types instead of blocking
Blocking the datatype manager when datatypes fail to start is a sign of
encryption issues, but is not fatal to sync. Therefore, it makes more sense
to sync what we can (particularly so we can receive birthday updates), while
marking those types with encryption problems as failed.
To accomplish this we move all automatic reconfiguration logic into the
data type manager itself, and add support for delayed association by
unapplying all types with cryptographer errors. This also lays the
groundwork for delayed association when a transaction version discrepancy
is detected.
BUG=238712
Review URL: https://chromiumcodereview.appspot.com/15013004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@204695 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/syncable')
-rw-r--r-- | sync/syncable/directory.cc | 159 | ||||
-rw-r--r-- | sync/syncable/directory.h | 34 | ||||
-rw-r--r-- | sync/syncable/syncable_mock.h | 4 | ||||
-rw-r--r-- | sync/syncable/syncable_unittest.cc | 8 |
4 files changed, 145 insertions, 60 deletions
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc index c2dcc16..eb15cd6 100644 --- a/sync/syncable/directory.cc +++ b/sync/syncable/directory.cc @@ -570,11 +570,101 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { return true; } -bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types, - ModelTypeSet types_to_journal) { - types.RemoveAll(ProxyTypes()); +void Directory::UnapplyEntry(EntryKernel* entry) { + int64 handle = entry->ref(META_HANDLE); + ModelType server_type = GetModelTypeFromSpecifics( + entry->ref(SERVER_SPECIFICS)); + + // Clear enough so that on the next sync cycle all local data will + // be overwritten. + // Note: do not modify the root node in order to preserve the + // initial sync ended bit for this type (else on the next restart + // this type will be treated as disabled and therefore fully purged). + if (IsRealDataType(server_type) && + ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) { + return; + } + + // Set the unapplied bit if this item has server data. + if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) { + entry->put(IS_UNAPPLIED_UPDATE, true); + kernel_->unapplied_update_metahandles[server_type].insert(handle); + entry->mark_dirty(&kernel_->dirty_metahandles); + } + + // Unset the unsynced bit. + if (entry->ref(IS_UNSYNCED)) { + kernel_->unsynced_metahandles.erase(handle); + entry->put(IS_UNSYNCED, false); + entry->mark_dirty(&kernel_->dirty_metahandles); + } + + // Mark the item as locally deleted. No deleted items are allowed in the + // parent child index. + if (!entry->ref(IS_DEL)) { + kernel_->parent_child_index.Remove(entry); + entry->put(IS_DEL, true); + entry->mark_dirty(&kernel_->dirty_metahandles); + } + + // Set the version to the "newly created" version. + if (entry->ref(BASE_VERSION) != CHANGES_VERSION) { + entry->put(BASE_VERSION, CHANGES_VERSION); + entry->mark_dirty(&kernel_->dirty_metahandles); + } + + // At this point locally created items that aren't synced will become locally + // deleted items, and purged on the next snapshot. All other items will match + // the state they would have had if they were just created via a server + // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). +} + +void Directory::DeleteEntry(bool save_to_journal, + EntryKernel* entry, + EntryKernelSet* entries_to_journal) { + int64 handle = entry->ref(META_HANDLE); + ModelType server_type = GetModelTypeFromSpecifics( + entry->ref(SERVER_SPECIFICS)); + + kernel_->metahandles_to_purge.insert(handle); + + size_t num_erased = 0; + num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); + DCHECK_EQ(1u, num_erased); + num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); + DCHECK_EQ(1u, num_erased); + num_erased = kernel_->unsynced_metahandles.erase(handle); + DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); + num_erased = + kernel_->unapplied_update_metahandles[server_type].erase(handle); + DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); + if (kernel_->parent_child_index.Contains(entry)) + kernel_->parent_child_index.Remove(entry); + + if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { + num_erased = + kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); + DCHECK_EQ(1u, num_erased); + } + if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { + num_erased = + kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); + DCHECK_EQ(1u, num_erased); + } + + if (save_to_journal) { + entries_to_journal->insert(entry); + } else { + delete entry; + } +} + +bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, + ModelTypeSet types_to_journal, + ModelTypeSet types_to_unapply) { + disabled_types.RemoveAll(ProxyTypes()); - if (types.Empty()) + if (disabled_types.Empty()) return true; { @@ -602,9 +692,8 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types, ModelType local_type = GetModelTypeFromSpecifics(local_specifics); ModelType server_type = GetModelTypeFromSpecifics(server_specifics); - // Note the dance around incrementing |it|, since we sometimes erase(). - if ((IsRealDataType(local_type) && types.Has(local_type)) || - (IsRealDataType(server_type) && types.Has(server_type))) { + if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) || + (IsRealDataType(server_type) && disabled_types.Has(server_type))) { to_purge.insert(it->second); } } @@ -612,57 +701,37 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types, for (std::set<EntryKernel*>::iterator it = to_purge.begin(); it != to_purge.end(); ++it) { EntryKernel* entry = *it; - int64 handle = entry->ref(META_HANDLE); - const sync_pb::EntitySpecifics& local_specifics = entry->ref(SPECIFICS); + const sync_pb::EntitySpecifics& local_specifics = + (*it)->ref(SPECIFICS); const sync_pb::EntitySpecifics& server_specifics = - entry->ref(SERVER_SPECIFICS); + (*it)->ref(SERVER_SPECIFICS); ModelType local_type = GetModelTypeFromSpecifics(local_specifics); ModelType server_type = GetModelTypeFromSpecifics(server_specifics); - kernel_->metahandles_to_purge.insert(handle); - - size_t num_erased = 0; - num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); - DCHECK_EQ(1u, num_erased); - num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); - DCHECK_EQ(1u, num_erased); - num_erased = kernel_->unsynced_metahandles.erase(handle); - DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); - num_erased = - kernel_->unapplied_update_metahandles[server_type].erase(handle); - DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); - if (kernel_->parent_child_index.Contains(entry)) - kernel_->parent_child_index.Remove(entry); - - if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { - num_erased = - kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); - DCHECK_EQ(1u, num_erased); - } - if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { - num_erased = - kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); - DCHECK_EQ(1u, num_erased); - } - - if ((types_to_journal.Has(local_type) || - types_to_journal.Has(server_type)) && - (delete_journal_->IsDeleteJournalEnabled(local_type) || - delete_journal_->IsDeleteJournalEnabled(server_type))) { - entries_to_journal.insert(entry); + if (types_to_unapply.Has(local_type) || + types_to_unapply.Has(server_type)) { + UnapplyEntry(entry); } else { - delete entry; + bool save_to_journal = + (types_to_journal.Has(local_type) || + types_to_journal.Has(server_type)) && + (delete_journal_->IsDeleteJournalEnabled(local_type) || + delete_journal_->IsDeleteJournalEnabled(server_type)); + DeleteEntry(save_to_journal, entry, &entries_to_journal); } } delete_journal_->AddJournalBatch(&trans, entries_to_journal); - // Ensure meta tracking for these data types reflects the deleted state. - for (ModelTypeSet::Iterator it = types.First(); + // Ensure meta tracking for these data types reflects the purged state. + for (ModelTypeSet::Iterator it = disabled_types.First(); it.Good(); it.Inc()) { - kernel_->persisted_info.reset_download_progress(it.Get()); kernel_->persisted_info.transaction_version[it.Get()] = 0; + + // Don't discard progress markers for unapplied types. + if (!types_to_unapply.Has(it.Get())) + kernel_->persisted_info.reset_download_progress(it.Get()); } } } diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h index b5818f6..4bfd274 100644 --- a/sync/syncable/directory.h +++ b/sync/syncable/directory.h @@ -367,19 +367,27 @@ class SYNC_EXPORT Directory { // should not be invoked outside of tests. bool FullyCheckTreeInvariants(BaseTransaction *trans); - // Purges all data associated with any entries whose ModelType or - // ServerModelType is found in |types|, from sync directory _both_ in memory - // and on disk. |types_to_journal| should be subset of |types| and data - // of |types_to_journal| are saved in delete journal to help prevent - // back-from-dead problem due to offline delete in next sync session. Only - // valid, "real" model types are allowed in |types| (see model_type.h for - // definitions). "Purge" is just meant to distinguish from "deleting" - // entries, which means something different in the syncable namespace. + // Purges data associated with any entries whose ModelType or ServerModelType + // is found in |disabled_types|, from sync directory _both_ in memory and on + // disk. Only valid, "real" model types are allowed in |disabled_types| (see + // model_type.h for definitions). + // 1. Data associated with |types_to_journal| is saved in the delete journal + // to help prevent back-from-dead problem due to offline delete in the next + // sync session. |types_to_journal| must be a subset of |disabled_types|. + // 2. Data associated with |types_to_unapply| is reset to an "unapplied" + // state, wherein all local data is deleted and IS_UNAPPLIED is set to true. + // This is useful when there's no benefit in discarding the currently + // downloaded state, such as when there are cryptographer errors. + // |types_to_unapply| must be a subset of |disabled_types|. + // 3. All other data is purged entirely. + // Note: "Purge" is just meant to distinguish from "deleting" entries, which + // means something different in the syncable namespace. // WARNING! This can be real slow, as it iterates over all entries. // WARNING! Performs synchronous I/O. // Returns: true on success, false if an error was encountered. - virtual bool PurgeEntriesWithTypeIn(ModelTypeSet types, - ModelTypeSet types_to_journal); + virtual bool PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, + ModelTypeSet types_to_journal, + ModelTypeSet types_to_unapply); private: // A helper that implements the logic of checking tree invariants. @@ -526,6 +534,12 @@ class SYNC_EXPORT Directory { const ScopedKernelLock& lock, const Id& parent_id, Directory::ChildHandles* result); + // Helper methods used by PurgeDisabledTypes. + void UnapplyEntry(EntryKernel* entry); + void DeleteEntry(bool save_to_journal, + EntryKernel* entry, + EntryKernelSet* entries_to_journal); + Kernel* kernel_; scoped_ptr<DirectoryBackingStore> store_; diff --git a/sync/syncable/syncable_mock.h b/sync/syncable/syncable_mock.h index 55068ed..8d0e88b 100644 --- a/sync/syncable/syncable_mock.h +++ b/sync/syncable/syncable_mock.h @@ -28,7 +28,9 @@ class MockDirectory : public Directory { MOCK_METHOD1(GetEntryByClientTag, syncable::EntryKernel*(const std::string&)); - MOCK_METHOD2(PurgeEntriesWithTypeIn, bool(ModelTypeSet, ModelTypeSet)); + MOCK_METHOD3(PurgeEntriesWithTypeIn, bool(ModelTypeSet, + ModelTypeSet, + ModelTypeSet)); private: syncable::NullDirectoryChangeDelegate delegate_; diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc index 115bb68..950a6b4 100644 --- a/sync/syncable/syncable_unittest.cc +++ b/sync/syncable/syncable_unittest.cc @@ -586,7 +586,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) { } ModelTypeSet to_purge(BOOKMARKS); - dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet()); + dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet(), ModelTypeSet()); Directory::SaveChangesSnapshot snapshot1; base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex); @@ -595,7 +595,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) { to_purge.Clear(); to_purge.Put(PREFERENCES); - dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet()); + dir_->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet(), ModelTypeSet()); dir_->HandleSaveChangesFailure(snapshot1); @@ -1774,7 +1774,7 @@ TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) { ASSERT_EQ(10U, all_set.size()); } - dir_->PurgeEntriesWithTypeIn(types_to_purge, ModelTypeSet()); + dir_->PurgeEntriesWithTypeIn(types_to_purge, ModelTypeSet(), ModelTypeSet()); // We first query the in-memory data, and then reload the directory (without // saving) to verify that disk does not still have the data. @@ -2033,7 +2033,7 @@ TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailureWithPurge) { ASSERT_TRUE(dir_->good()); ModelTypeSet set(BOOKMARKS); - dir_->PurgeEntriesWithTypeIn(set, ModelTypeSet()); + dir_->PurgeEntriesWithTypeIn(set, ModelTypeSet(), ModelTypeSet()); EXPECT_TRUE(IsInMetahandlesToPurge(handle1)); ASSERT_FALSE(dir_->SaveChanges()); EXPECT_TRUE(IsInMetahandlesToPurge(handle1)); |