diff options
author | tim@chromium.org <tim@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-02 04:17:38 +0000 |
---|---|---|
committer | tim@chromium.org <tim@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-02 04:17:38 +0000 |
commit | 3d807d391319ce5b97e75c77a131c802fe184ccc (patch) | |
tree | a77da2bde1330e94e1b9e165f3d2ea32b8f84e63 /chrome | |
parent | 2f6d64de8b337131d31aa488eed54a353e241e16 (diff) | |
download | chromium_src-3d807d391319ce5b97e75c77a131c802fe184ccc.zip chromium_src-3d807d391319ce5b97e75c77a131c802fe184ccc.tar.gz chromium_src-3d807d391319ce5b97e75c77a131c802fe184ccc.tar.bz2 |
sync: add CleanupDisabledTypesCommand to purge data pertaining to previously
synced data types that the user has disabled.
Despite my attempt at simplifying the purge code in Directory[BackingStore], I had to go back to my first attempt with DeleteEntries (which was previously reviewed separately), with a few extra gotchas in Directory::PurgeEntriesWithTypeIn.
BUG=40252
TEST=CleanupDisabledTypesCommandTest, SyncableDirectoryTest, DirectoryBackingStoreTest
Review URL: http://codereview.chromium.org/2865022
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@51491 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome')
23 files changed, 599 insertions, 63 deletions
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc index 4643239..2cf2825 100644 --- a/chrome/browser/sync/engine/apply_updates_command_unittest.cc +++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc @@ -43,7 +43,7 @@ class ApplyUpdatesCommandTest : public SyncerCommandTest { // Create a new unapplied update. void CreateUnappliedNewItemWithParent(const string& item_id, const string& parent_id) { - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__); MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM, @@ -62,7 +62,7 @@ class ApplyUpdatesCommandTest : public SyncerCommandTest { void CreateUnappliedNewItem(const string& item_id, const sync_pb::EntitySpecifics& specifics) { - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__); MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM, diff --git a/chrome/browser/sync/engine/cleanup_disabled_types_command.cc b/chrome/browser/sync/engine/cleanup_disabled_types_command.cc new file mode 100644 index 0000000..6e1a81e --- /dev/null +++ b/chrome/browser/sync/engine/cleanup_disabled_types_command.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "chrome/browser/sync/engine/cleanup_disabled_types_command.h" + +#include "chrome/browser/sync/sessions/sync_session.h" +#include "chrome/browser/sync/sessions/sync_session_context.h" +#include "chrome/browser/sync/syncable/directory_manager.h" +#include "chrome/browser/sync/syncable/model_type.h" +#include "chrome/browser/sync/syncable/syncable.h" + +namespace browser_sync { + +CleanupDisabledTypesCommand::CleanupDisabledTypesCommand() {} +CleanupDisabledTypesCommand::~CleanupDisabledTypesCommand() {} + +void CleanupDisabledTypesCommand::ExecuteImpl(sessions::SyncSession* session) { + syncable::ModelTypeSet to_cleanup; + for (int i = syncable::FIRST_REAL_MODEL_TYPE; + i < syncable::MODEL_TYPE_COUNT; i++) { + syncable::ModelType model_type = syncable::ModelTypeFromInt(i); + + if (session->routing_info().count(model_type)) + continue; + + // The type isn't currently desired. Because a full directory purge is + // slow, we avoid purging undesired types unless we have reason to believe + // they were previously enabled. Because purging could theoretically fail, + // on the first sync session (when there's no previous routing info) we pay + // the full directory scan price once and do a "deep clean" of types that + // may potentially need cleanup so that we converge to the correct state. + // + // in_previous | !in_previous + // | + // initial_sync_ended should clean | may have attempted cleanup + // !initial_sync_ended should clean | may have never been enabled, or + // | could have been disabled before + // | initial sync ended and cleanup + // | may not have happened yet + // | (failure, browser restart + // | before another sync session,..) + const ModelSafeRoutingInfo& previous_routing = + session->context()->previous_session_routing_info(); + if (previous_routing.empty() || previous_routing.count(model_type)) + to_cleanup.insert(model_type); + } + + if (to_cleanup.empty()) + return; + + syncable::ScopedDirLookup dir(session->context()->directory_manager(), + session->context()->account_name()); + if (!dir.good()) { + LOG(ERROR) << "Scoped dir lookup failed!"; + return; + } + + dir->PurgeEntriesWithTypeIn(to_cleanup); +} + +} // namespace browser_sync + diff --git a/chrome/browser/sync/engine/cleanup_disabled_types_command.h b/chrome/browser/sync/engine/cleanup_disabled_types_command.h new file mode 100644 index 0000000..fb06724 --- /dev/null +++ b/chrome/browser/sync/engine/cleanup_disabled_types_command.h @@ -0,0 +1,44 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CHROME_BROWSER_SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_ +#define CHROME_BROWSER_SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_ + +#include "chrome/browser/sync/engine/syncer_command.h" + +namespace browser_sync { + +// A syncer command that purges (from memory and disk) entries belonging to +// a ModelType or ServerModelType that the user has not elected to sync. +// +// This is done as part of a session to 1) ensure it does not block the UI, +// and 2) avoid complicated races that could arise between a) deleting +// things b) a sync session trying to use these things c) and the potential +// re-enabling of the data type by the user before some scheduled deletion +// took place. Here, we are safe to perform I/O synchronously and we know it +// is a safe time to delete as we are in the only active session. +// +// The removal from memory is done synchronously, while the disk purge is left +// to an asynchronous SaveChanges operation. However, all the updates for +// meta data fields (such as initial_sync_ended) as well as the actual entry +// deletions will be committed in a single sqlite transaction. Thus it is +// possible that disabled types re-appear (in the sync db) after a reboot, +// but things will remain in a consistent state. This kind of error case is +// cared for in this command by retrying; see ExecuteImpl. +class CleanupDisabledTypesCommand : public SyncerCommand { + public: + CleanupDisabledTypesCommand(); + virtual ~CleanupDisabledTypesCommand(); + + // SyncerCommand implementation. + virtual void ExecuteImpl(sessions::SyncSession* session); + + private: + DISALLOW_COPY_AND_ASSIGN(CleanupDisabledTypesCommand); +}; + +} // namespace browser_sync + +#endif // CHROME_BROWSER_SYNC_ENGINE_CLEANUP_DISABLED_TYPES_COMMAND_H_ + diff --git a/chrome/browser/sync/engine/cleanup_disabled_types_command_unittest.cc b/chrome/browser/sync/engine/cleanup_disabled_types_command_unittest.cc new file mode 100644 index 0000000..4cb4edb --- /dev/null +++ b/chrome/browser/sync/engine/cleanup_disabled_types_command_unittest.cc @@ -0,0 +1,103 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <vector> + +#include "chrome/browser/sync/engine/cleanup_disabled_types_command.h" + +#include "chrome/browser/sync/engine/syncer_end_command.h" +#include "chrome/browser/sync/sessions/sync_session.h" +#include "chrome/test/sync/engine/syncer_command_test.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "testing/gmock/include/gmock/gmock.h" + +using testing::_; + +namespace browser_sync { + +using sessions::ScopedSessionContextSyncerEventChannel; + +class CleanupDisabledTypesCommandTest : public MockDirectorySyncerCommandTest { + public: + CleanupDisabledTypesCommandTest() { + for (int i = syncable::FIRST_REAL_MODEL_TYPE; + i < syncable::MODEL_TYPE_COUNT; i++) { + all_types_.insert(syncable::ModelTypeFromInt(i)); + } + } + virtual void SetUp() { + mutable_routing_info()->clear(); + (*mutable_routing_info())[syncable::BOOKMARKS] = GROUP_PASSIVE; + MockDirectorySyncerCommandTest::SetUp(); + } + + // Overridden to allow SyncerEndCommand Execute to work. + virtual bool IsSyncingCurrentlySilenced() { + return false; + } + + const syncable::ModelTypeSet& all_types() { return all_types_; } + + private: + syncable::ModelTypeSet all_types_; +}; + +// TODO(tim): Add syncer test to verify previous routing info is set. +TEST_F(CleanupDisabledTypesCommandTest, NoPreviousRoutingInfo) { + CleanupDisabledTypesCommand command; + syncable::ModelTypeSet expected(all_types()); + expected.erase(syncable::BOOKMARKS); + EXPECT_CALL(*mock_directory(), PurgeEntriesWithTypeIn(expected)); + command.ExecuteImpl(session()); +} + +TEST_F(CleanupDisabledTypesCommandTest, NoPurge) { + CleanupDisabledTypesCommand command; + EXPECT_CALL(*mock_directory(), PurgeEntriesWithTypeIn(_)).Times(0); + + ModelSafeRoutingInfo prev(routing_info()); + session()->context()->set_previous_session_routing_info(prev); + (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_PASSIVE; + command.ExecuteImpl(session()); + + prev = routing_info(); + command.ExecuteImpl(session()); +} + +TEST_F(CleanupDisabledTypesCommandTest, TypeDisabled) { + CleanupDisabledTypesCommand command; + syncable::ModelTypeSet expected; + expected.insert(syncable::PASSWORDS); + expected.insert(syncable::PREFERENCES); + + (*mutable_routing_info())[syncable::AUTOFILL] = GROUP_PASSIVE; + (*mutable_routing_info())[syncable::THEMES] = GROUP_PASSIVE; + (*mutable_routing_info())[syncable::EXTENSIONS] = GROUP_PASSIVE; + + ModelSafeRoutingInfo prev(routing_info()); + prev[syncable::PASSWORDS] = GROUP_PASSIVE; + prev[syncable::PREFERENCES] = GROUP_PASSIVE; + session()->context()->set_previous_session_routing_info(prev); + + EXPECT_CALL(*mock_directory(), PurgeEntriesWithTypeIn(expected)); + command.ExecuteImpl(session()); +} + +TEST_F(CleanupDisabledTypesCommandTest, + SyncerEndCommandSetsPreviousRoutingInfo) { + SyncerEndCommand command; + // Need channel for SyncerEndCommand. + scoped_ptr<SyncerEventChannel> c(new SyncerEventChannel()); + ScopedSessionContextSyncerEventChannel s(session()->context(), c.get()); + + ModelSafeRoutingInfo info; + EXPECT_TRUE(info == session()->context()->previous_session_routing_info()); + command.ExecuteImpl(session()); + ASSERT_FALSE(routing_info().empty()); + EXPECT_TRUE(routing_info() == + session()->context()->previous_session_routing_info()); +} + +} // namespace browser_sync + diff --git a/chrome/browser/sync/engine/download_updates_command_unittest.cc b/chrome/browser/sync/engine/download_updates_command_unittest.cc index 238377a..1b87ad2 100644 --- a/chrome/browser/sync/engine/download_updates_command_unittest.cc +++ b/chrome/browser/sync/engine/download_updates_command_unittest.cc @@ -88,7 +88,7 @@ TEST_F(DownloadUpdatesCommandTest, SetRequestedTypes) { } TEST_F(DownloadUpdatesCommandTest, OldestTimestampPicked) { - syncable::ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + syncable::ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); // i,j,k range over every possible model type. j and k are enabled and at diff --git a/chrome/browser/sync/engine/process_commit_response_command_unittest.cc b/chrome/browser/sync/engine/process_commit_response_command_unittest.cc index f313a41..b6afab8 100644 --- a/chrome/browser/sync/engine/process_commit_response_command_unittest.cc +++ b/chrome/browser/sync/engine/process_commit_response_command_unittest.cc @@ -86,7 +86,7 @@ class ProcessCommitResponseCommandTestWithParam bool is_folder, syncable::ModelType model_type, int64* metahandle_out) { - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__); Id predecessor_id = dir->GetLastChildId(&trans, parent_id); @@ -132,7 +132,7 @@ class ProcessCommitResponseCommandTestWithParam commit_set_->AddCommitItem(metahandle, item_id, model_type); sync_state->set_commit_set(*commit_set_.get()); - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__); MutableEntry entry(&trans, syncable::GET_BY_ID, item_id); @@ -228,7 +228,7 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) { command_.ExecuteImpl(session()); - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); ReadTransaction trans(dir, __FILE__, __LINE__); Id new_fid = dir->GetFirstChildId(&trans, id_factory_.root()); @@ -282,7 +282,7 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) { // Verify that the item is reachable. { - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); ReadTransaction trans(dir, __FILE__, __LINE__); ASSERT_EQ(folder_id, dir->GetFirstChildId(&trans, id_factory_.root())); @@ -315,7 +315,7 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) { // items in the commit batch should have their IDs changed to server IDs. command_.ExecuteImpl(session()); - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); ReadTransaction trans(dir, __FILE__, __LINE__); // Lookup the parent folder by finding a child of the root. We can't use diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc index 2956a04..b3608c6 100644 --- a/chrome/browser/sync/engine/syncer.cc +++ b/chrome/browser/sync/engine/syncer.cc @@ -10,6 +10,7 @@ #include "chrome/browser/sync/engine/apply_updates_command.h" #include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h" #include "chrome/browser/sync/engine/build_commit_command.h" +#include "chrome/browser/sync/engine/cleanup_disabled_types_command.h" #include "chrome/browser/sync/engine/conflict_resolver.h" #include "chrome/browser/sync/engine/download_updates_command.h" #include "chrome/browser/sync/engine/get_commit_ids_command.h" @@ -119,8 +120,15 @@ void Syncer::SyncShare(sessions::SyncSession* session, switch (current_step) { case SYNCER_BEGIN: LOG(INFO) << "Syncer Begin"; + next_step = CLEANUP_DISABLED_TYPES; + break; + case CLEANUP_DISABLED_TYPES: { + LOG(INFO) << "Cleaning up disabled types"; + CleanupDisabledTypesCommand cleanup; + cleanup.Execute(session); next_step = DOWNLOAD_UPDATES; break; + } case DOWNLOAD_UPDATES: { LOG(INFO) << "Downloading Updates"; DownloadUpdatesCommand download_updates; diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h index 27a8d70..820f760 100644 --- a/chrome/browser/sync/engine/syncer.h +++ b/chrome/browser/sync/engine/syncer.h @@ -44,6 +44,7 @@ static const int kDefaultMaxCommitBatchSize = 25; enum SyncerStep { SYNCER_BEGIN, + CLEANUP_DISABLED_TYPES, DOWNLOAD_UPDATES, PROCESS_CLIENT_COMMAND, VERIFY_UPDATES, diff --git a/chrome/browser/sync/engine/syncer_end_command.cc b/chrome/browser/sync/engine/syncer_end_command.cc index 6d7f091..7756d7a 100644 --- a/chrome/browser/sync/engine/syncer_end_command.cc +++ b/chrome/browser/sync/engine/syncer_end_command.cc @@ -17,6 +17,8 @@ SyncerEndCommand::~SyncerEndCommand() {} void SyncerEndCommand::ExecuteImpl(sessions::SyncSession* session) { sessions::StatusController* status(session->status_controller()); status->set_syncing(false); + session->context()->set_previous_session_routing_info( + session->routing_info()); // This might be the first time we've fully completed a sync cycle, for // some subset of the currently synced datatypes. diff --git a/chrome/browser/sync/engine/verify_updates_command_unittest.cc b/chrome/browser/sync/engine/verify_updates_command_unittest.cc index b8693b4..3ac4bf2 100644 --- a/chrome/browser/sync/engine/verify_updates_command_unittest.cc +++ b/chrome/browser/sync/engine/verify_updates_command_unittest.cc @@ -41,7 +41,7 @@ class VerifyUpdatesCommandTest : public SyncerCommandTest { void CreateLocalItem(const std::string& item_id, const std::string& parent_id, const syncable::ModelType& type) { - ScopedDirLookup dir(syncdb().manager(), syncdb().name()); + ScopedDirLookup dir(syncdb()->manager(), syncdb()->name()); ASSERT_TRUE(dir.good()); WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__); MutableEntry entry(&trans, syncable::CREATE_NEW_UPDATE_ITEM, diff --git a/chrome/browser/sync/sessions/sync_session_context.h b/chrome/browser/sync/sessions/sync_session_context.h index 494fbdc..bc4ea28 100644 --- a/chrome/browser/sync/sessions/sync_session_context.h +++ b/chrome/browser/sync/sessions/sync_session_context.h @@ -96,6 +96,14 @@ class SyncSessionContext { } const std::string& account_name() { return account_name_; } + const ModelSafeRoutingInfo& previous_session_routing_info() const { + return previous_session_routing_info_; + } + + void set_previous_session_routing_info(const ModelSafeRoutingInfo& info) { + previous_session_routing_info_ = info; + } + private: // Rather than force clients to set and null-out various context members, we // extend our encapsulation boundary to scoped helpers that take care of this @@ -126,6 +134,10 @@ class SyncSessionContext { // The name of the account being synced. std::string account_name_; + // Some routing info history to help us clean up types that get disabled + // by the user. + ModelSafeRoutingInfo previous_session_routing_info_; + DISALLOW_COPY_AND_ASSIGN(SyncSessionContext); }; diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc index 63df81d..b4de3f6 100644 --- a/chrome/browser/sync/syncable/directory_backing_store.cc +++ b/chrome/browser/sync/syncable/directory_backing_store.cc @@ -250,6 +250,33 @@ void DirectoryBackingStore::EndLoad() { load_dbhandle_ = NULL; // No longer used. } +void DirectoryBackingStore::EndSave() { + sqlite3_close(save_dbhandle_); + save_dbhandle_ = NULL; +} + +bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) { + if (handles.empty()) + return true; + + sqlite3* dbhandle = LazyGetSaveHandle(); + + string query = "DELETE FROM metas WHERE metahandle IN ("; + for (MetahandleSet::const_iterator it = handles.begin(); it != handles.end(); + ++it) { + if (it != handles.begin()) + query.append(","); + query.append(Int64ToString(*it)); + } + query.append(")"); + SQLStatement statement; + int result = statement.prepare(dbhandle, query.data(), query.size()); + if (SQLITE_OK == result) + result = statement.step(); + + return SQLITE_DONE == result; +} + bool DirectoryBackingStore::SaveChanges( const Directory::SaveChangesSnapshot& snapshot) { sqlite3* dbhandle = LazyGetSaveHandle(); @@ -273,6 +300,9 @@ bool DirectoryBackingStore::SaveChanges( return false; } + if (!DeleteEntries(snapshot.metahandles_to_purge)) + return false; + if (save_info) { const Directory::PersistedKernelInfo& info = snapshot.kernel_info; SQLStatement update; diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h index fcbd848..36322e8 100644 --- a/chrome/browser/sync/syncable/directory_backing_store.h +++ b/chrome/browser/sync/syncable/directory_backing_store.h @@ -81,6 +81,7 @@ class DirectoryBackingStore { FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion71To72); FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds); FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption); + FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries); FRIEND_TEST_ALL_PREFIXES(MigrationTest, ToCurrentVersion); friend class MigrationTest; @@ -122,6 +123,13 @@ class DirectoryBackingStore { bool BeginLoad(); void EndLoad(); + // Close save_dbhandle_. Broken out for testing. + void EndSave(); + + // Removes each entry whose metahandle is in |handles| from the database. + // Does synchronous I/O. Returns false on error. + bool DeleteEntries(const MetahandleSet& handles); + // Lazy creation of save_dbhandle_ for use by SaveChanges code path. sqlite3* LazyGetSaveHandle(); diff --git a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc index 3d6d143..b1f5dc98 100644 --- a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc +++ b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc @@ -1043,4 +1043,49 @@ TEST_F(DirectoryBackingStoreTest, Corruption) { } } +TEST_F(DirectoryBackingStoreTest, DeleteEntries) { + SetUpCurrentDatabaseAndCheckVersion(); + scoped_ptr<DirectoryBackingStore> dbs( + new DirectoryBackingStore(GetUsername(), GetDatabasePath())); + dbs->BeginLoad(); + + MetahandlesIndex index; + dbs->LoadEntries(&index); + size_t initial_size = index.size(); + ASSERT_LT(0U, initial_size) << "Test requires entries to delete."; + int64 first_to_die = (*index.begin())->ref(META_HANDLE); + MetahandleSet to_delete; + to_delete.insert(first_to_die); + EXPECT_TRUE(dbs->DeleteEntries(to_delete)); + + index.clear(); + dbs->LoadEntries(&index); + + EXPECT_EQ(initial_size - 1, index.size()); + bool delete_failed = false; + for (MetahandlesIndex::iterator it = index.begin(); it != index.end(); + ++it) { + if ((*it)->ref(META_HANDLE) == first_to_die) { + delete_failed = true; + break; + } + } + EXPECT_FALSE(delete_failed); + + to_delete.clear(); + for (MetahandlesIndex::iterator it = index.begin(); it != index.end(); + ++it) { + to_delete.insert((*it)->ref(META_HANDLE)); + } + + EXPECT_TRUE(dbs->DeleteEntries(to_delete)); + + index.clear(); + dbs->LoadEntries(&index); + EXPECT_EQ(0U, index.size()); + + dbs->EndLoad(); + dbs->EndSave(); +} + } // namespace syncable diff --git a/chrome/browser/sync/syncable/directory_manager.h b/chrome/browser/sync/syncable/directory_manager.h index 61f303e..103ec30 100644 --- a/chrome/browser/sync/syncable/directory_manager.h +++ b/chrome/browser/sync/syncable/directory_manager.h @@ -54,7 +54,7 @@ class DirectoryManager { // root_path specifies where db is stored. explicit DirectoryManager(const FilePath& root_path); - ~DirectoryManager(); + virtual ~DirectoryManager(); static const FilePath GetSyncDataDatabaseFilename(); const FilePath GetSyncDataDatabasePath() const; diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc index 3e93d40..2f11806 100644 --- a/chrome/browser/sync/syncable/syncable.cc +++ b/chrome/browser/sync/syncable/syncable.cc @@ -157,6 +157,11 @@ bool LessPathNames::operator() (const string& a, const string& b) const { static const DirectoryChangeEvent kShutdownChangesEvent = { DirectoryChangeEvent::SHUTDOWN, 0, 0 }; +void Directory::init_kernel(const std::string& name) { + DCHECK(kernel_ == NULL); + kernel_ = new Kernel(FilePath(), name, KernelLoadInfo()); +} + Directory::Kernel::Kernel(const FilePath& db_path, const string& name, const KernelLoadInfo& info) @@ -170,6 +175,7 @@ Directory::Kernel::Kernel(const FilePath& db_path, unapplied_update_metahandles(new MetahandleSet), unsynced_metahandles(new MetahandleSet), dirty_metahandles(new MetahandleSet), + metahandles_to_purge(new MetahandleSet), channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)), info_status(Directory::KERNEL_SHARE_INFO_VALID), persisted_info(info.kernel_info), @@ -197,6 +203,7 @@ Directory::Kernel::~Kernel() { delete unsynced_metahandles; delete unapplied_update_metahandles; delete dirty_metahandles; + delete metahandles_to_purge; delete parent_id_child_index; delete client_tag_index; delete ids_index; @@ -522,6 +529,10 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { } ClearDirtyMetahandles(); + // Set purged handles. + DCHECK(snapshot->metahandles_to_purge.empty()); + snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); + // Fill kernel_info_status and kernel_info. snapshot->kernel_info = kernel_->persisted_info; // To avoid duplicates when the process crashes, we record the next_id to be @@ -579,6 +590,7 @@ void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { // Might not be in it num_erased = kernel_->client_tag_index->erase(entry); DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); + DCHECK(!kernel_->parent_id_child_index->count(entry)); delete entry; } } @@ -594,30 +606,42 @@ void Directory::PurgeEntriesWithTypeIn(const std::set<ModelType>& types) { return; { - ScopedKernelLock lock(this); - for (MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); - it != kernel_->metahandles_index->end(); ++it) { - const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); - const sync_pb::EntitySpecifics& server_specifics = - (*it)->ref(SERVER_SPECIFICS); - ModelType local_type = GetModelTypeFromSpecifics(local_specifics); - ModelType server_type = GetModelTypeFromSpecifics(server_specifics); - - if (types.count(local_type) > 0 || types.count(server_type) > 0) { - // Set conditions for permanent deletion. - (*it)->put(IS_DEL, true); - (*it)->put(IS_UNSYNCED, false); - (*it)->put(IS_UNAPPLIED_UPDATE, false); - (*it)->mark_dirty(kernel_->dirty_metahandles); - DCHECK(!SafeToPurgeFromMemory(*it)); + WriteTransaction trans(this, PURGE_ENTRIES, __FILE__, __LINE__); + { + ScopedKernelLock lock(this); + MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); + while (it != kernel_->metahandles_index->end()) { + const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); + const sync_pb::EntitySpecifics& server_specifics = + (*it)->ref(SERVER_SPECIFICS); + ModelType local_type = GetModelTypeFromSpecifics(local_specifics); + ModelType server_type = GetModelTypeFromSpecifics(server_specifics); + + // Note the dance around incrementing |it|, since we sometimes erase(). + if (types.count(local_type) > 0 || types.count(server_type) > 0) { + UnlinkEntryFromOrder(*it, NULL, &lock); + + kernel_->metahandles_to_purge->insert((*it)->ref(META_HANDLE)); + + size_t num_erased = 0; + num_erased = kernel_->ids_index->erase(*it); + DCHECK_EQ(1u, num_erased); + num_erased = kernel_->client_tag_index->erase(*it); + DCHECK_EQ((*it)->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); + num_erased = kernel_->parent_id_child_index->erase(*it); + DCHECK_EQ((*it)->ref(IS_DEL), !num_erased); + kernel_->metahandles_index->erase(it++); + } else { + ++it; + } } - } - // Ensure meta tracking for these data types reflects the deleted state. - for (std::set<ModelType>::const_iterator it = types.begin(); - it != types.end(); ++it) { - set_initial_sync_ended_for_type_unsafe(*it, false); - set_last_download_timestamp_unsafe(*it, 0); + // Ensure meta tracking for these data types reflects the deleted state. + for (std::set<ModelType>::const_iterator it = types.begin(); + it != types.end(); ++it) { + set_initial_sync_ended_for_type_unsafe(*it, false); + set_last_download_timestamp_unsafe(*it, 0); + } } } } @@ -640,6 +664,9 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { (*found)->mark_dirty(kernel_->dirty_metahandles); } } + + kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), + snapshot.metahandles_to_purge.end()); } int64 Directory::last_download_timestamp(ModelType model_type) const { @@ -1276,32 +1303,44 @@ bool MutableEntry::Put(IndexedBitField field, bool value) { } void MutableEntry::UnlinkFromOrder() { - Id old_previous = Get(PREV_ID); - Id old_next = Get(NEXT_ID); + ScopedKernelLock lock(dir()); + dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock); +} - // Self-looping signifies that this item is not in the order. If we were to - // set these to 0, we could get into trouble because this node might look - // like the first node in the ordering. - Put(NEXT_ID, Get(ID)); - Put(PREV_ID, Get(ID)); +void Directory::UnlinkEntryFromOrder(EntryKernel* entry, + WriteTransaction* trans, + ScopedKernelLock* lock) { + CHECK(!trans || this == trans->directory()); + Id old_previous = entry->ref(PREV_ID); + Id old_next = entry->ref(NEXT_ID); + + entry->put(NEXT_ID, entry->ref(ID)); + entry->put(PREV_ID, entry->ref(ID)); + entry->mark_dirty(kernel_->dirty_metahandles); if (!old_previous.IsRoot()) { if (old_previous == old_next) { // Note previous == next doesn't imply previous == next == Get(ID). We // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added // and deleted before receiving the server ID in the commit response. - CHECK((old_next == Get(ID)) || !old_next.ServerKnows()); + CHECK((old_next == entry->ref(ID)) || !old_next.ServerKnows()); return; // Done if we were already self-looped (hence unlinked). } - MutableEntry previous_entry(write_transaction(), GET_BY_ID, old_previous); - CHECK(previous_entry.good()); - previous_entry.Put(NEXT_ID, old_next); + EntryKernel* previous_entry = GetEntryById(old_previous, lock); + CHECK(previous_entry); + if (trans) + trans->SaveOriginal(previous_entry); + previous_entry->put(NEXT_ID, old_next); + previous_entry->mark_dirty(kernel_->dirty_metahandles); } if (!old_next.IsRoot()) { - MutableEntry next_entry(write_transaction(), GET_BY_ID, old_next); - CHECK(next_entry.good()); - next_entry.Put(PREV_ID, old_previous); + EntryKernel* next_entry = GetEntryById(old_next, lock); + CHECK(next_entry); + if (trans) + trans->SaveOriginal(next_entry); + next_entry->put(PREV_ID, old_previous); + next_entry->mark_dirty(kernel_->dirty_metahandles); } } diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h index fc5f33f..8a651a9 100644 --- a/chrome/browser/sync/syncable/syncable.h +++ b/chrome/browser/sync/syncable/syncable.h @@ -570,7 +570,13 @@ struct MultiTypeTimeStamp { // the write. This is defined up here since DirectoryChangeEvent also contains // one. enum WriterTag { - INVALID, SYNCER, AUTHWATCHER, UNITTEST, VACUUM_AFTER_SAVE, SYNCAPI + INVALID, + SYNCER, + AUTHWATCHER, + UNITTEST, + VACUUM_AFTER_SAVE, + PURGE_ENTRIES, + SYNCAPI }; // A separate Event type and channel for very frequent changes, caused @@ -644,6 +650,8 @@ class Directory { FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest); FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, TestPurgeEntriesWithTypeIn); + FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, + TakeSnapshotGetsMetahandlesToPurge); public: class EventListenerHookup; @@ -692,6 +700,7 @@ class Directory { KernelShareInfoStatus kernel_info_status; PersistedKernelInfo kernel_info; OriginalEntries dirty_metas; + MetahandleSet metahandles_to_purge; SaveChangesSnapshot() : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { } }; @@ -773,6 +782,9 @@ class Directory { // The semantic checking is implemented higher up. void Undelete(EntryKernel* const entry); void Delete(EntryKernel* const entry); + void UnlinkEntryFromOrder(EntryKernel* entry, + WriteTransaction* trans, + ScopedKernelLock* lock); // Overridden by tests. virtual DirectoryBackingStore* CreateBackingStore( @@ -862,7 +874,7 @@ class Directory { // entries, which means something different in the syncable namespace. // WARNING! This can be real slow, as it iterates over all entries. // WARNING! Performs synchronous I/O. - void PurgeEntriesWithTypeIn(const std::set<ModelType>& types); + virtual void PurgeEntriesWithTypeIn(const std::set<ModelType>& types); private: // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles @@ -926,6 +938,10 @@ class Directory { typedef std::set<EntryKernel*, LessField<StringField, UNIQUE_CLIENT_TAG> > ClientTagIndex; + protected: + // Used by tests. + void init_kernel(const std::string& name); + private: struct Kernel { @@ -970,6 +986,10 @@ class Directory { // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges(). MetahandleSet* const dirty_metahandles; + // When a purge takes place, we remove items from all our indices and stash + // them in here so that SaveChanges can persist their permanent deletion. + MetahandleSet* const metahandles_to_purge; + // TODO(ncarter): Figure out what the hell this is, and comment it. Channel* const channel; diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc index 809a47c..f5a9fa7 100644 --- a/chrome/browser/sync/syncable/syncable_unittest.cc +++ b/chrome/browser/sync/syncable/syncable_unittest.cc @@ -300,24 +300,25 @@ class SyncableDirectoryTest : public testing::Test { return 1 == dir_->kernel_->dirty_metahandles->count(metahandle); } - bool IsSafeToPermanentlyDelete(Entry* e) { - return e->Get(IS_DEL) && !e->Get(IS_UNSYNCED) && - !e->Get(IS_UNAPPLIED_UPDATE); + bool IsInMetahandlesToPurge(int64 metahandle) { + return 1 == dir_->kernel_->metahandles_to_purge->count(metahandle); } void CheckPurgeEntriesWithTypeInSucceeded(const ModelTypeSet& types_to_purge, bool before_reload) { + SCOPED_TRACE(testing::Message("Before reload: ") << before_reload); { ReadTransaction trans(dir_.get(), __FILE__, __LINE__); MetahandleSet all_set; dir_->GetAllMetaHandles(&trans, &all_set); - EXPECT_EQ(before_reload ? 7U : 3U, all_set.size()); + EXPECT_EQ(3U, all_set.size()); + if (before_reload) + EXPECT_EQ(4U, dir_->kernel_->metahandles_to_purge->size()); for (MetahandleSet::iterator iter = all_set.begin(); iter != all_set.end(); ++iter) { Entry e(&trans, GET_BY_HANDLE, *iter); if ((types_to_purge.count(e.GetModelType()) || - types_to_purge.count(e.GetServerModelType())) && - (!before_reload || !IsSafeToPermanentlyDelete(&e))) { + types_to_purge.count(e.GetServerModelType()))) { FAIL() << "Illegal type should have been deleted."; } } @@ -359,6 +360,49 @@ class SyncableDirectoryTest : public testing::Test { bool set_server_fields, bool is_dir, bool add_to_lru, int64 *meta_handle); }; +TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) { + const int metas_to_create = 50; + MetahandleSet expected_purges; + MetahandleSet all_handles; + { + WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__); + for (int i = 0; i < metas_to_create; i++) { + MutableEntry e(&trans, CREATE, trans.root_id(), "foo"); + e.Put(IS_UNSYNCED, true); + sync_pb::EntitySpecifics specs; + if (i % 2 == 0) { + AddDefaultExtensionValue(BOOKMARKS, &specs); + expected_purges.insert(e.Get(META_HANDLE)); + all_handles.insert(e.Get(META_HANDLE)); + } else { + AddDefaultExtensionValue(PREFERENCES, &specs); + all_handles.insert(e.Get(META_HANDLE)); + } + e.Put(SPECIFICS, specs); + e.Put(SERVER_SPECIFICS, specs); + } + } + + ModelTypeSet to_purge; + to_purge.insert(BOOKMARKS); + dir_->PurgeEntriesWithTypeIn(to_purge); + + Directory::SaveChangesSnapshot snapshot1; + AutoLock scoped_lock(dir_->kernel_->save_changes_mutex); + dir_->TakeSnapshotForSaveChanges(&snapshot1); + EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge); + + to_purge.clear(); + to_purge.insert(PREFERENCES); + dir_->PurgeEntriesWithTypeIn(to_purge); + + dir_->HandleSaveChangesFailure(snapshot1); + + Directory::SaveChangesSnapshot snapshot2; + dir_->TakeSnapshotForSaveChanges(&snapshot2); + EXPECT_TRUE(all_handles == snapshot2.metahandles_to_purge); +} + TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) { const int metahandles_to_create = 100; std::vector<int64> expected_dirty_metahandles; @@ -1155,6 +1199,44 @@ TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) { } } +TEST_F(SyncableDirectoryTest, TestSaveChangesFailureWithPurge) { + int64 handle1 = 0; + // Set up an item using a regular, saveable directory. + { + WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__); + + MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera"); + ASSERT_TRUE(e1.good()); + EXPECT_TRUE(e1.GetKernelCopy().is_dirty()); + handle1 = e1.Get(META_HANDLE); + e1.Put(BASE_VERSION, 1); + e1.Put(IS_DIR, true); + e1.Put(ID, TestIdFactory::FromNumber(101)); + sync_pb::EntitySpecifics bookmark_specs; + AddDefaultExtensionValue(BOOKMARKS, &bookmark_specs); + e1.Put(SPECIFICS, bookmark_specs); + e1.Put(SERVER_SPECIFICS, bookmark_specs); + e1.Put(ID, TestIdFactory::FromNumber(101)); + EXPECT_TRUE(e1.GetKernelCopy().is_dirty()); + EXPECT_TRUE(IsInDirtyMetahandles(handle1)); + } + ASSERT_TRUE(dir_->SaveChanges()); + + // Now do some operations using a directory for which SaveChanges will + // always fail. + dir_.reset(new TestUnsaveableDirectory()); + ASSERT_TRUE(dir_.get()); + ASSERT_TRUE(OPENED == dir_->Open(FilePath(kFilePath), kName)); + ASSERT_TRUE(dir_->good()); + + ModelTypeSet set; + set.insert(BOOKMARKS); + dir_->PurgeEntriesWithTypeIn(set); + EXPECT_TRUE(IsInMetahandlesToPurge(handle1)); + ASSERT_FALSE(dir_->SaveChanges()); + EXPECT_TRUE(IsInMetahandlesToPurge(handle1)); +} + // Create items of each model type, and check that GetModelType and // GetServerModelType return the right value. TEST_F(SyncableDirectoryTest, GetModelType) { diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp index 55a3894..9427025 100644 --- a/chrome/chrome.gyp +++ b/chrome/chrome.gyp @@ -838,6 +838,8 @@ 'browser/sync/engine/build_commit_command.h', 'browser/sync/engine/change_reorder_buffer.cc', 'browser/sync/engine/change_reorder_buffer.h', + 'browser/sync/engine/cleanup_disabled_types_command.cc', + 'browser/sync/engine/cleanup_disabled_types_command.h', 'browser/sync/engine/conflict_resolver.cc', 'browser/sync/engine/conflict_resolver.h', 'browser/sync/engine/download_updates_command.cc', diff --git a/chrome/chrome_tests.gypi b/chrome/chrome_tests.gypi index b53d9aa..bfa6cec 100644 --- a/chrome/chrome_tests.gypi +++ b/chrome/chrome_tests.gypi @@ -1821,6 +1821,7 @@ 'browser/sync/engine/all_status_unittest.cc', 'browser/sync/engine/apply_updates_command_unittest.cc', 'browser/sync/engine/auth_watcher_unittest.cc', + 'browser/sync/engine/cleanup_disabled_types_command_unittest.cc', 'browser/sync/engine/download_updates_command_unittest.cc', 'browser/sync/engine/mock_model_safe_workers.h', 'browser/sync/engine/process_commit_response_command_unittest.cc', diff --git a/chrome/test/sync/engine/syncer_command_test.h b/chrome/test/sync/engine/syncer_command_test.h index 14e4da5..9a3c87c 100644 --- a/chrome/test/sync/engine/syncer_command_test.h +++ b/chrome/test/sync/engine/syncer_command_test.h @@ -24,6 +24,10 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, public sessions::SyncSession::Delegate, public ModelSafeWorkerRegistrar { public: + enum UseMockDirectory { + USE_MOCK_DIRECTORY + }; + // SyncSession::Delegate implementation. virtual void OnSilencedUntil(const base::TimeTicks& silenced_until) { FAIL() << "Should not get silenced."; @@ -53,17 +57,21 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, } protected: - SyncerCommandTestWithParam() {} + SyncerCommandTestWithParam() : syncdb_(new TestDirectorySetterUpper()) {} + + explicit SyncerCommandTestWithParam(UseMockDirectory use_mock) + : syncdb_(new MockDirectorySetterUpper()) {} + virtual ~SyncerCommandTestWithParam() {} virtual void SetUp() { - syncdb_.SetUp(); + syncdb_->SetUp(); ResetContext(); } virtual void TearDown() { - syncdb_.TearDown(); + syncdb_->TearDown(); } - const TestDirectorySetterUpper& syncdb() const { return syncdb_; } + TestDirectorySetterUpper* syncdb() { return syncdb_.get(); } sessions::SyncSessionContext* context() const { return context_.get(); } sessions::SyncSession::Delegate* delegate() { return this; } ModelSafeWorkerRegistrar* registrar() { return this; } @@ -79,8 +87,8 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, void ResetContext() { context_.reset(new sessions::SyncSessionContext( - mock_server_.get(), NULL, syncdb_.manager(), registrar())); - context_->set_account_name(syncdb_.name()); + mock_server_.get(), NULL, syncdb_->manager(), registrar())); + context_->set_account_name(syncdb_->name()); ClearSession(); } @@ -88,7 +96,7 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, // the context does not have a MockServerConnection attached. void ConfigureMockServerConnection() { mock_server_.reset( - new MockConnectionManager(syncdb_.manager(), syncdb_.name())); + new MockConnectionManager(syncdb_->manager(), syncdb_->name())); ResetContext(); } @@ -104,7 +112,7 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, } private: - TestDirectorySetterUpper syncdb_; + scoped_ptr<TestDirectorySetterUpper> syncdb_; scoped_ptr<sessions::SyncSessionContext> context_; scoped_ptr<MockConnectionManager> mock_server_; scoped_ptr<sessions::SyncSession> session_; @@ -115,6 +123,17 @@ class SyncerCommandTestWithParam : public testing::TestWithParam<T>, class SyncerCommandTest : public SyncerCommandTestWithParam<void*> {}; +class MockDirectorySyncerCommandTest + : public SyncerCommandTestWithParam<void*> { + public: + MockDirectorySyncerCommandTest() + : SyncerCommandTestWithParam<void*>( + SyncerCommandTestWithParam<void*>::USE_MOCK_DIRECTORY) {} + MockDirectorySetterUpper::MockDirectory* mock_directory() { + return static_cast<MockDirectorySetterUpper*>(syncdb())->directory(); + } +}; + } // namespace browser_sync #endif // CHROME_TEST_SYNC_ENGINE_SYNCER_COMMAND_TEST_H_ diff --git a/chrome/test/sync/engine/test_directory_setter_upper.cc b/chrome/test/sync/engine/test_directory_setter_upper.cc index 5d861d9..4818280 100644 --- a/chrome/test/sync/engine/test_directory_setter_upper.cc +++ b/chrome/test/sync/engine/test_directory_setter_upper.cc @@ -25,11 +25,15 @@ TestDirectorySetterUpper::~TestDirectorySetterUpper() {} void TestDirectorySetterUpper::Init() { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); - manager_.reset(new DirectoryManager(temp_dir_.path())); + reset_directory_manager(new DirectoryManager(temp_dir_.path())); file_path_ = manager_->GetSyncDataDatabasePath(); file_util::Delete(file_path_, false); } +void TestDirectorySetterUpper::reset_directory_manager(DirectoryManager* d) { + manager_.reset(d); +} + void TestDirectorySetterUpper::SetUp() { Init(); ASSERT_TRUE(manager()->Open(name())); @@ -104,4 +108,27 @@ void TriggeredOpenTestDirectorySetterUpper::TearDown() { } } +MockDirectorySetterUpper::MockDirectory::MockDirectory( + const std::string& name) { + init_kernel(name); +} + +MockDirectorySetterUpper::Manager::Manager( + const FilePath& root_path, syncable::Directory* dir) : + syncable::DirectoryManager(root_path) { + managed_directory_ = dir; +} + +MockDirectorySetterUpper::MockDirectorySetterUpper() + : directory_(new MockDirectory(name())) { +} + +void MockDirectorySetterUpper::SetUp() { + reset_directory_manager(new Manager(FilePath(), directory_.get())); +} + +void MockDirectorySetterUpper::TearDown() { + // Nothing to do here. +} + } // namespace browser_sync diff --git a/chrome/test/sync/engine/test_directory_setter_upper.h b/chrome/test/sync/engine/test_directory_setter_upper.h index 0cb3bd6a..b5d5be1 100644 --- a/chrome/test/sync/engine/test_directory_setter_upper.h +++ b/chrome/test/sync/engine/test_directory_setter_upper.h @@ -34,8 +34,10 @@ #include "base/scoped_ptr.h" #include "base/scoped_temp_dir.h" +#include "chrome/browser/sync/syncable/directory_manager.h" #include "chrome/browser/sync/syncable/syncable.h" #include "chrome/browser/sync/util/sync_types.h" +#include "testing/gmock/include/gmock/gmock.h" namespace syncable { class DirectoryManager; @@ -65,6 +67,7 @@ class TestDirectorySetterUpper { // Subclasses may want to use a different directory name. explicit TestDirectorySetterUpper(const std::string& name); virtual void Init(); + void reset_directory_manager(syncable::DirectoryManager* d); private: void RunInvariantCheck(const syncable::ScopedDirLookup& dir); @@ -100,6 +103,33 @@ class TriggeredOpenTestDirectorySetterUpper : public TestDirectorySetterUpper { virtual void TearDown(); }; +// Use this when you don't want to test the whole stack down to the Directory +// level, as it installs a google mock Directory implementation. +class MockDirectorySetterUpper : public TestDirectorySetterUpper { + public: + class Manager : public syncable::DirectoryManager { + public: + Manager(const FilePath& root_path, syncable::Directory* dir); + virtual ~Manager() { managed_directory_ = NULL; } + }; + + class MockDirectory : public syncable::Directory { + public: + explicit MockDirectory(const std::string& name); + virtual ~MockDirectory() {} + MOCK_METHOD1(PurgeEntriesWithTypeIn, void(const syncable::ModelTypeSet&)); + }; + + MockDirectorySetterUpper(); + + virtual void SetUp(); + virtual void TearDown(); + MockDirectory* directory() { return directory_.get(); } + + private: + scoped_ptr<MockDirectory> directory_; +}; + } // namespace browser_sync #endif // CHROME_TEST_SYNC_ENGINE_TEST_DIRECTORY_SETTER_UPPER_H_ |