summaryrefslogtreecommitdiffstats
path: root/chrome/browser/sync/syncable
diff options
context:
space:
mode:
Diffstat (limited to 'chrome/browser/sync/syncable')
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.cc30
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h8
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store_unittest.cc45
-rw-r--r--chrome/browser/sync/syncable/directory_manager.h2
-rw-r--r--chrome/browser/sync/syncable/syncable.cc111
-rw-r--r--chrome/browser/sync/syncable/syncable.h24
-rw-r--r--chrome/browser/sync/syncable/syncable_unittest.cc94
7 files changed, 269 insertions, 45 deletions
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
index 63df81d..b4de3f6 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -250,6 +250,33 @@ void DirectoryBackingStore::EndLoad() {
load_dbhandle_ = NULL; // No longer used.
}
+void DirectoryBackingStore::EndSave() {
+ sqlite3_close(save_dbhandle_);
+ save_dbhandle_ = NULL;
+}
+
+bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
+ if (handles.empty())
+ return true;
+
+ sqlite3* dbhandle = LazyGetSaveHandle();
+
+ string query = "DELETE FROM metas WHERE metahandle IN (";
+ for (MetahandleSet::const_iterator it = handles.begin(); it != handles.end();
+ ++it) {
+ if (it != handles.begin())
+ query.append(",");
+ query.append(Int64ToString(*it));
+ }
+ query.append(")");
+ SQLStatement statement;
+ int result = statement.prepare(dbhandle, query.data(), query.size());
+ if (SQLITE_OK == result)
+ result = statement.step();
+
+ return SQLITE_DONE == result;
+}
+
bool DirectoryBackingStore::SaveChanges(
const Directory::SaveChangesSnapshot& snapshot) {
sqlite3* dbhandle = LazyGetSaveHandle();
@@ -273,6 +300,9 @@ bool DirectoryBackingStore::SaveChanges(
return false;
}
+ if (!DeleteEntries(snapshot.metahandles_to_purge))
+ return false;
+
if (save_info) {
const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
SQLStatement update;
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
index fcbd848..36322e8 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.h
+++ b/chrome/browser/sync/syncable/directory_backing_store.h
@@ -81,6 +81,7 @@ class DirectoryBackingStore {
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion71To72);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries);
FRIEND_TEST_ALL_PREFIXES(MigrationTest, ToCurrentVersion);
friend class MigrationTest;
@@ -122,6 +123,13 @@ class DirectoryBackingStore {
bool BeginLoad();
void EndLoad();
+ // Close save_dbhandle_. Broken out for testing.
+ void EndSave();
+
+ // Removes each entry whose metahandle is in |handles| from the database.
+ // Does synchronous I/O. Returns false on error.
+ bool DeleteEntries(const MetahandleSet& handles);
+
// Lazy creation of save_dbhandle_ for use by SaveChanges code path.
sqlite3* LazyGetSaveHandle();
diff --git a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
index 3d6d143..b1f5dc98 100644
--- a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
@@ -1043,4 +1043,49 @@ TEST_F(DirectoryBackingStoreTest, Corruption) {
}
}
+TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
+ SetUpCurrentDatabaseAndCheckVersion();
+ scoped_ptr<DirectoryBackingStore> dbs(
+ new DirectoryBackingStore(GetUsername(), GetDatabasePath()));
+ dbs->BeginLoad();
+
+ MetahandlesIndex index;
+ dbs->LoadEntries(&index);
+ size_t initial_size = index.size();
+ ASSERT_LT(0U, initial_size) << "Test requires entries to delete.";
+ int64 first_to_die = (*index.begin())->ref(META_HANDLE);
+ MetahandleSet to_delete;
+ to_delete.insert(first_to_die);
+ EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+
+ index.clear();
+ dbs->LoadEntries(&index);
+
+ EXPECT_EQ(initial_size - 1, index.size());
+ bool delete_failed = false;
+ for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
+ ++it) {
+ if ((*it)->ref(META_HANDLE) == first_to_die) {
+ delete_failed = true;
+ break;
+ }
+ }
+ EXPECT_FALSE(delete_failed);
+
+ to_delete.clear();
+ for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
+ ++it) {
+ to_delete.insert((*it)->ref(META_HANDLE));
+ }
+
+ EXPECT_TRUE(dbs->DeleteEntries(to_delete));
+
+ index.clear();
+ dbs->LoadEntries(&index);
+ EXPECT_EQ(0U, index.size());
+
+ dbs->EndLoad();
+ dbs->EndSave();
+}
+
} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_manager.h b/chrome/browser/sync/syncable/directory_manager.h
index 61f303e..103ec30 100644
--- a/chrome/browser/sync/syncable/directory_manager.h
+++ b/chrome/browser/sync/syncable/directory_manager.h
@@ -54,7 +54,7 @@ class DirectoryManager {
// root_path specifies where db is stored.
explicit DirectoryManager(const FilePath& root_path);
- ~DirectoryManager();
+ virtual ~DirectoryManager();
static const FilePath GetSyncDataDatabaseFilename();
const FilePath GetSyncDataDatabasePath() const;
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
index 3e93d40..2f11806 100644
--- a/chrome/browser/sync/syncable/syncable.cc
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -157,6 +157,11 @@ bool LessPathNames::operator() (const string& a, const string& b) const {
static const DirectoryChangeEvent kShutdownChangesEvent =
{ DirectoryChangeEvent::SHUTDOWN, 0, 0 };
+void Directory::init_kernel(const std::string& name) {
+ DCHECK(kernel_ == NULL);
+ kernel_ = new Kernel(FilePath(), name, KernelLoadInfo());
+}
+
Directory::Kernel::Kernel(const FilePath& db_path,
const string& name,
const KernelLoadInfo& info)
@@ -170,6 +175,7 @@ Directory::Kernel::Kernel(const FilePath& db_path,
unapplied_update_metahandles(new MetahandleSet),
unsynced_metahandles(new MetahandleSet),
dirty_metahandles(new MetahandleSet),
+ metahandles_to_purge(new MetahandleSet),
channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)),
info_status(Directory::KERNEL_SHARE_INFO_VALID),
persisted_info(info.kernel_info),
@@ -197,6 +203,7 @@ Directory::Kernel::~Kernel() {
delete unsynced_metahandles;
delete unapplied_update_metahandles;
delete dirty_metahandles;
+ delete metahandles_to_purge;
delete parent_id_child_index;
delete client_tag_index;
delete ids_index;
@@ -522,6 +529,10 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
}
ClearDirtyMetahandles();
+ // Set purged handles.
+ DCHECK(snapshot->metahandles_to_purge.empty());
+ snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
+
// Fill kernel_info_status and kernel_info.
snapshot->kernel_info = kernel_->persisted_info;
// To avoid duplicates when the process crashes, we record the next_id to be
@@ -579,6 +590,7 @@ void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
// Might not be in it
num_erased = kernel_->client_tag_index->erase(entry);
DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
+ DCHECK(!kernel_->parent_id_child_index->count(entry));
delete entry;
}
}
@@ -594,30 +606,42 @@ void Directory::PurgeEntriesWithTypeIn(const std::set<ModelType>& types) {
return;
{
- ScopedKernelLock lock(this);
- for (MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
- it != kernel_->metahandles_index->end(); ++it) {
- const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
- const sync_pb::EntitySpecifics& server_specifics =
- (*it)->ref(SERVER_SPECIFICS);
- ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
- ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
-
- if (types.count(local_type) > 0 || types.count(server_type) > 0) {
- // Set conditions for permanent deletion.
- (*it)->put(IS_DEL, true);
- (*it)->put(IS_UNSYNCED, false);
- (*it)->put(IS_UNAPPLIED_UPDATE, false);
- (*it)->mark_dirty(kernel_->dirty_metahandles);
- DCHECK(!SafeToPurgeFromMemory(*it));
+ WriteTransaction trans(this, PURGE_ENTRIES, __FILE__, __LINE__);
+ {
+ ScopedKernelLock lock(this);
+ MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
+ while (it != kernel_->metahandles_index->end()) {
+ const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
+ const sync_pb::EntitySpecifics& server_specifics =
+ (*it)->ref(SERVER_SPECIFICS);
+ ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
+ ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
+
+ // Note the dance around incrementing |it|, since we sometimes erase().
+ if (types.count(local_type) > 0 || types.count(server_type) > 0) {
+ UnlinkEntryFromOrder(*it, NULL, &lock);
+
+ kernel_->metahandles_to_purge->insert((*it)->ref(META_HANDLE));
+
+ size_t num_erased = 0;
+ num_erased = kernel_->ids_index->erase(*it);
+ DCHECK_EQ(1u, num_erased);
+ num_erased = kernel_->client_tag_index->erase(*it);
+ DCHECK_EQ((*it)->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
+ num_erased = kernel_->parent_id_child_index->erase(*it);
+ DCHECK_EQ((*it)->ref(IS_DEL), !num_erased);
+ kernel_->metahandles_index->erase(it++);
+ } else {
+ ++it;
+ }
}
- }
- // Ensure meta tracking for these data types reflects the deleted state.
- for (std::set<ModelType>::const_iterator it = types.begin();
- it != types.end(); ++it) {
- set_initial_sync_ended_for_type_unsafe(*it, false);
- set_last_download_timestamp_unsafe(*it, 0);
+ // Ensure meta tracking for these data types reflects the deleted state.
+ for (std::set<ModelType>::const_iterator it = types.begin();
+ it != types.end(); ++it) {
+ set_initial_sync_ended_for_type_unsafe(*it, false);
+ set_last_download_timestamp_unsafe(*it, 0);
+ }
}
}
}
@@ -640,6 +664,9 @@ void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
(*found)->mark_dirty(kernel_->dirty_metahandles);
}
}
+
+ kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
+ snapshot.metahandles_to_purge.end());
}
int64 Directory::last_download_timestamp(ModelType model_type) const {
@@ -1276,32 +1303,44 @@ bool MutableEntry::Put(IndexedBitField field, bool value) {
}
void MutableEntry::UnlinkFromOrder() {
- Id old_previous = Get(PREV_ID);
- Id old_next = Get(NEXT_ID);
+ ScopedKernelLock lock(dir());
+ dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock);
+}
- // Self-looping signifies that this item is not in the order. If we were to
- // set these to 0, we could get into trouble because this node might look
- // like the first node in the ordering.
- Put(NEXT_ID, Get(ID));
- Put(PREV_ID, Get(ID));
+void Directory::UnlinkEntryFromOrder(EntryKernel* entry,
+ WriteTransaction* trans,
+ ScopedKernelLock* lock) {
+ CHECK(!trans || this == trans->directory());
+ Id old_previous = entry->ref(PREV_ID);
+ Id old_next = entry->ref(NEXT_ID);
+
+ entry->put(NEXT_ID, entry->ref(ID));
+ entry->put(PREV_ID, entry->ref(ID));
+ entry->mark_dirty(kernel_->dirty_metahandles);
if (!old_previous.IsRoot()) {
if (old_previous == old_next) {
// Note previous == next doesn't imply previous == next == Get(ID). We
// could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
// and deleted before receiving the server ID in the commit response.
- CHECK((old_next == Get(ID)) || !old_next.ServerKnows());
+ CHECK((old_next == entry->ref(ID)) || !old_next.ServerKnows());
return; // Done if we were already self-looped (hence unlinked).
}
- MutableEntry previous_entry(write_transaction(), GET_BY_ID, old_previous);
- CHECK(previous_entry.good());
- previous_entry.Put(NEXT_ID, old_next);
+ EntryKernel* previous_entry = GetEntryById(old_previous, lock);
+ CHECK(previous_entry);
+ if (trans)
+ trans->SaveOriginal(previous_entry);
+ previous_entry->put(NEXT_ID, old_next);
+ previous_entry->mark_dirty(kernel_->dirty_metahandles);
}
if (!old_next.IsRoot()) {
- MutableEntry next_entry(write_transaction(), GET_BY_ID, old_next);
- CHECK(next_entry.good());
- next_entry.Put(PREV_ID, old_previous);
+ EntryKernel* next_entry = GetEntryById(old_next, lock);
+ CHECK(next_entry);
+ if (trans)
+ trans->SaveOriginal(next_entry);
+ next_entry->put(PREV_ID, old_previous);
+ next_entry->mark_dirty(kernel_->dirty_metahandles);
}
}
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
index fc5f33f..8a651a9 100644
--- a/chrome/browser/sync/syncable/syncable.h
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -570,7 +570,13 @@ struct MultiTypeTimeStamp {
// the write. This is defined up here since DirectoryChangeEvent also contains
// one.
enum WriterTag {
- INVALID, SYNCER, AUTHWATCHER, UNITTEST, VACUUM_AFTER_SAVE, SYNCAPI
+ INVALID,
+ SYNCER,
+ AUTHWATCHER,
+ UNITTEST,
+ VACUUM_AFTER_SAVE,
+ PURGE_ENTRIES,
+ SYNCAPI
};
// A separate Event type and channel for very frequent changes, caused
@@ -644,6 +650,8 @@ class Directory {
FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
TakeSnapshotGetsOnlyDirtyHandlesTest);
FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, TestPurgeEntriesWithTypeIn);
+ FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
+ TakeSnapshotGetsMetahandlesToPurge);
public:
class EventListenerHookup;
@@ -692,6 +700,7 @@ class Directory {
KernelShareInfoStatus kernel_info_status;
PersistedKernelInfo kernel_info;
OriginalEntries dirty_metas;
+ MetahandleSet metahandles_to_purge;
SaveChangesSnapshot() : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
}
};
@@ -773,6 +782,9 @@ class Directory {
// The semantic checking is implemented higher up.
void Undelete(EntryKernel* const entry);
void Delete(EntryKernel* const entry);
+ void UnlinkEntryFromOrder(EntryKernel* entry,
+ WriteTransaction* trans,
+ ScopedKernelLock* lock);
// Overridden by tests.
virtual DirectoryBackingStore* CreateBackingStore(
@@ -862,7 +874,7 @@ class Directory {
// entries, which means something different in the syncable namespace.
// WARNING! This can be real slow, as it iterates over all entries.
// WARNING! Performs synchronous I/O.
- void PurgeEntriesWithTypeIn(const std::set<ModelType>& types);
+ virtual void PurgeEntriesWithTypeIn(const std::set<ModelType>& types);
private:
// Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
@@ -926,6 +938,10 @@ class Directory {
typedef std::set<EntryKernel*,
LessField<StringField, UNIQUE_CLIENT_TAG> > ClientTagIndex;
+ protected:
+ // Used by tests.
+ void init_kernel(const std::string& name);
+
private:
struct Kernel {
@@ -970,6 +986,10 @@ class Directory {
// necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
MetahandleSet* const dirty_metahandles;
+ // When a purge takes place, we remove items from all our indices and stash
+ // them in here so that SaveChanges can persist their permanent deletion.
+ MetahandleSet* const metahandles_to_purge;
+
// TODO(ncarter): Figure out what the hell this is, and comment it.
Channel* const channel;
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
index 809a47c..f5a9fa7 100644
--- a/chrome/browser/sync/syncable/syncable_unittest.cc
+++ b/chrome/browser/sync/syncable/syncable_unittest.cc
@@ -300,24 +300,25 @@ class SyncableDirectoryTest : public testing::Test {
return 1 == dir_->kernel_->dirty_metahandles->count(metahandle);
}
- bool IsSafeToPermanentlyDelete(Entry* e) {
- return e->Get(IS_DEL) && !e->Get(IS_UNSYNCED) &&
- !e->Get(IS_UNAPPLIED_UPDATE);
+ bool IsInMetahandlesToPurge(int64 metahandle) {
+ return 1 == dir_->kernel_->metahandles_to_purge->count(metahandle);
}
void CheckPurgeEntriesWithTypeInSucceeded(const ModelTypeSet& types_to_purge,
bool before_reload) {
+ SCOPED_TRACE(testing::Message("Before reload: ") << before_reload);
{
ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
MetahandleSet all_set;
dir_->GetAllMetaHandles(&trans, &all_set);
- EXPECT_EQ(before_reload ? 7U : 3U, all_set.size());
+ EXPECT_EQ(3U, all_set.size());
+ if (before_reload)
+ EXPECT_EQ(4U, dir_->kernel_->metahandles_to_purge->size());
for (MetahandleSet::iterator iter = all_set.begin();
iter != all_set.end(); ++iter) {
Entry e(&trans, GET_BY_HANDLE, *iter);
if ((types_to_purge.count(e.GetModelType()) ||
- types_to_purge.count(e.GetServerModelType())) &&
- (!before_reload || !IsSafeToPermanentlyDelete(&e))) {
+ types_to_purge.count(e.GetServerModelType()))) {
FAIL() << "Illegal type should have been deleted.";
}
}
@@ -359,6 +360,49 @@ class SyncableDirectoryTest : public testing::Test {
bool set_server_fields, bool is_dir, bool add_to_lru, int64 *meta_handle);
};
+TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) {
+ const int metas_to_create = 50;
+ MetahandleSet expected_purges;
+ MetahandleSet all_handles;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ for (int i = 0; i < metas_to_create; i++) {
+ MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
+ e.Put(IS_UNSYNCED, true);
+ sync_pb::EntitySpecifics specs;
+ if (i % 2 == 0) {
+ AddDefaultExtensionValue(BOOKMARKS, &specs);
+ expected_purges.insert(e.Get(META_HANDLE));
+ all_handles.insert(e.Get(META_HANDLE));
+ } else {
+ AddDefaultExtensionValue(PREFERENCES, &specs);
+ all_handles.insert(e.Get(META_HANDLE));
+ }
+ e.Put(SPECIFICS, specs);
+ e.Put(SERVER_SPECIFICS, specs);
+ }
+ }
+
+ ModelTypeSet to_purge;
+ to_purge.insert(BOOKMARKS);
+ dir_->PurgeEntriesWithTypeIn(to_purge);
+
+ Directory::SaveChangesSnapshot snapshot1;
+ AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
+ dir_->TakeSnapshotForSaveChanges(&snapshot1);
+ EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge);
+
+ to_purge.clear();
+ to_purge.insert(PREFERENCES);
+ dir_->PurgeEntriesWithTypeIn(to_purge);
+
+ dir_->HandleSaveChangesFailure(snapshot1);
+
+ Directory::SaveChangesSnapshot snapshot2;
+ dir_->TakeSnapshotForSaveChanges(&snapshot2);
+ EXPECT_TRUE(all_handles == snapshot2.metahandles_to_purge);
+}
+
TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
const int metahandles_to_create = 100;
std::vector<int64> expected_dirty_metahandles;
@@ -1155,6 +1199,44 @@ TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) {
}
}
+TEST_F(SyncableDirectoryTest, TestSaveChangesFailureWithPurge) {
+ int64 handle1 = 0;
+ // Set up an item using a regular, saveable directory.
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera");
+ ASSERT_TRUE(e1.good());
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ sync_pb::EntitySpecifics bookmark_specs;
+ AddDefaultExtensionValue(BOOKMARKS, &bookmark_specs);
+ e1.Put(SPECIFICS, bookmark_specs);
+ e1.Put(SERVER_SPECIFICS, bookmark_specs);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
+ EXPECT_TRUE(IsInDirtyMetahandles(handle1));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ // Now do some operations using a directory for which SaveChanges will
+ // always fail.
+ dir_.reset(new TestUnsaveableDirectory());
+ ASSERT_TRUE(dir_.get());
+ ASSERT_TRUE(OPENED == dir_->Open(FilePath(kFilePath), kName));
+ ASSERT_TRUE(dir_->good());
+
+ ModelTypeSet set;
+ set.insert(BOOKMARKS);
+ dir_->PurgeEntriesWithTypeIn(set);
+ EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
+ ASSERT_FALSE(dir_->SaveChanges());
+ EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
+}
+
// Create items of each model type, and check that GetModelType and
// GetServerModelType return the right value.
TEST_F(SyncableDirectoryTest, GetModelType) {