diff options
author | rlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-16 19:09:47 +0000 |
---|---|---|
committer | rlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-16 19:09:47 +0000 |
commit | 414717ef8c0f9ffb6b34dc36629624134fec0536 (patch) | |
tree | 343114f8667a7e9b6a529dd3a947e6562ef602e3 /sync/syncable | |
parent | 430ea5f0f3a35406be69c16b96355b409ccecf07 (diff) | |
download | chromium_src-414717ef8c0f9ffb6b34dc36629624134fec0536.zip chromium_src-414717ef8c0f9ffb6b34dc36629624134fec0536.tar.gz chromium_src-414717ef8c0f9ffb6b34dc36629624134fec0536.tar.bz2 |
Sync: Clear IS_UNSYNCED for deleted local items
Prior to this change, if we were to delete a newly-created item before
informing the sync server of its existence, it would end up in an
unusual state. We do not commit deleted items unless they were known to
sync server. However, these items were still considered to be
'unsynced' (ie. waiting to be committed to the server) and therefore
impossible to entirely remove from the sync directory. They remain
forever both IS_DEL and IS_UNSYNCED.
This had a few side-effects. The most serious is that this behaviour
could cause directory corruption. If we created a folder and a bookmark
within it, then deleted that bookmark before it had a chance to sync,
the bookmark would remain in a zombie state. When we finally got around
to committing its parent folder, the folder's ID will change. The
zombie bookmark's PARENT_ID will not be updated, leaving us with a
dangling reference that will be detected as directory corruption the
next time sync is loaded. See crbug.com/125381 for details.
Another effect is that locally deleted, never synced UNIQUE_CLIENT_TAG
could have strange effects later on. If a foreign client were to create
an item with the same UNIQUE_CLIENT_TAG and sync it, this client would
conflict-resolve the update against the zombie entry. The zombie entry
would win, the newly created item would be deleted, and the deletion
synced back to the server. This may not be an entirely bad thing, but
it is a behaviour that's changed following this patch. From now on, the
deleted item will be overwritten (if it still exists, which it probably
won't).
Finally, we now have a mechanism for permanently deleting these items.
With this patch applied, these items will no longer have the unsynced
bit set, so they will be deleted on restart by DropDeletedEntries().
This patch changes PutIsDel() to clear the IS_UNSYNCED bit of entries
which were never committed to the sync server. The remainder of the
changes either add or update existing tests.
BUG=125381
TEST=SyncerTest.ClientTagUncommittedTagMatchesUpdate,
SyncableDirectoryTest.ChangeEntryIDAndUpdateChildren_DeletedAndUnsyncedChildren
Review URL: https://chromiumcodereview.appspot.com/10389103
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137476 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/syncable')
-rw-r--r-- | sync/syncable/dir_open_result.h | 1 | ||||
-rw-r--r-- | sync/syncable/in_memory_directory_backing_store.cc | 2 | ||||
-rw-r--r-- | sync/syncable/syncable.cc | 55 | ||||
-rw-r--r-- | sync/syncable/syncable.h | 4 | ||||
-rw-r--r-- | sync/syncable/syncable_unittest.cc | 134 |
5 files changed, 195 insertions, 1 deletions
diff --git a/sync/syncable/dir_open_result.h b/sync/syncable/dir_open_result.h index f298ff0..8fe5171 100644 --- a/sync/syncable/dir_open_result.h +++ b/sync/syncable/dir_open_result.h @@ -15,6 +15,7 @@ enum DirOpenResult { NOT_INITIALIZED, FAILED_DISK_FULL, // The disk is full. FAILED_DATABASE_CORRUPT, // Something is wrong with the DB FAILED_LOGICAL_CORRUPTION, // Invalid database contents + FAILED_IN_UNITTEST, // For tests. }; } // namespace syncable diff --git a/sync/syncable/in_memory_directory_backing_store.cc b/sync/syncable/in_memory_directory_backing_store.cc index 30d3e19..a737ae0 100644 --- a/sync/syncable/in_memory_directory_backing_store.cc +++ b/sync/syncable/in_memory_directory_backing_store.cc @@ -21,6 +21,8 @@ DirOpenResult InMemoryDirectoryBackingStore::Load( if (!InitializeTables()) return FAILED_OPEN_DATABASE; + if (!DropDeletedEntries()) + return FAILED_DATABASE_CORRUPT; if (!LoadEntries(entry_bucket)) return FAILED_DATABASE_CORRUPT; if (!LoadInfo(kernel_load_info)) diff --git a/sync/syncable/syncable.cc b/sync/syncable/syncable.cc index 760a331..68b5bab 100644 --- a/sync/syncable/syncable.cc +++ b/sync/syncable/syncable.cc @@ -1736,6 +1736,18 @@ bool MutableEntry::PutIsDel(bool is_del) { if (!UnlinkFromOrder()) { return false; } + + // If the server never knew about this item and it's deleted then we don't + // need to keep it around. Unsetting IS_UNSYNCED will: + // - Ensure that the item is never committed to the server. + // - Allow any items with the same UNIQUE_CLIENT_TAG created on other + // clients to override this entry. + // - Let us delete this entry permanently through + // DirectoryBackingStore::DropDeletedEntries() when we next restart sync. + // This will save memory and avoid crbug.com/125381. + if (!Get(ID).ServerKnows()) { + Put(IS_UNSYNCED, false); + } } { @@ -2376,4 +2388,47 @@ EntryKernel* Directory::GetPossibleLastChildForTest( return NULL; } +void ChangeEntryIDAndUpdateChildren( + syncable::WriteTransaction* trans, + syncable::MutableEntry* entry, + const syncable::Id& new_id) { + syncable::Id old_id = entry->Get(ID); + if (!entry->Put(ID, new_id)) { + Entry old_entry(trans, GET_BY_ID, new_id); + CHECK(old_entry.good()); + LOG(FATAL) << "Attempt to change ID to " << new_id + << " conflicts with existing entry.\n\n" + << *entry << "\n\n" << old_entry; + } + if (entry->Get(IS_DIR)) { + // Get all child entries of the old id. + syncable::Directory::ChildHandles children; + trans->directory()->GetChildHandlesById(trans, old_id, &children); + Directory::ChildHandles::iterator i = children.begin(); + while (i != children.end()) { + MutableEntry child_entry(trans, GET_BY_HANDLE, *i++); + CHECK(child_entry.good()); + // Use the unchecked setter here to avoid touching the child's NEXT_ID + // and PREV_ID fields (which Put(PARENT_ID) would normally do to + // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID + // among the children will be valid after the loop, since we update all + // the children at once. + child_entry.PutParentIdPropertyOnly(new_id); + } + } + // Update Id references on the previous and next nodes in the sibling + // order. Do this by reinserting into the linked list; the first + // step in PutPredecessor is to Unlink from the existing order, which + // will overwrite the stale Id value from the adjacent nodes. + if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) && + entry->Get(PREV_ID) == old_id) { + // We just need a shallow update to |entry|'s fields since it is already + // self looped. + entry->Put(NEXT_ID, new_id); + entry->Put(PREV_ID, new_id); + } else { + entry->PutPredecessor(entry->Get(PREV_ID)); + } +} + } // namespace syncable diff --git a/sync/syncable/syncable.h b/sync/syncable/syncable.h index d568a01..ef15a19 100644 --- a/sync/syncable/syncable.h +++ b/sync/syncable/syncable.h @@ -1345,6 +1345,10 @@ bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid); // This function sets only the flags needed to get this entry to sync. bool MarkForSyncing(syncable::MutableEntry* e); +void ChangeEntryIDAndUpdateChildren(syncable::WriteTransaction* trans, + syncable::MutableEntry* entry, + const syncable::Id& new_id); + } // namespace syncable std::ostream& operator <<(std::ostream&, const syncable::Blob&); diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc index c4f8701..bf8a0be 100644 --- a/sync/syncable/syncable_unittest.cc +++ b/sync/syncable/syncable_unittest.cc @@ -429,7 +429,8 @@ class SyncableDirectoryTest : public testing::Test { } virtual void TearDown() { - dir_->SaveChanges(); + if (dir_.get()) + dir_->SaveChanges(); dir_.reset(); } @@ -508,6 +509,15 @@ class SyncableDirectoryTest : public testing::Test { int64 base_version, int64 server_version, bool is_del); + + // When a directory is saved then loaded from disk, it will pass through + // DropDeletedEntries(). This will remove some entries from the directory. + // This function is intended to simulate that process. + // + // WARNING: The directory will be deleted by this operation. You should + // not have any pointers to the directory (open transactions included) + // when you call this. + DirOpenResult SimulateSaveAndReloadDir(); }; TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) { @@ -1154,6 +1164,102 @@ TEST_F(SyncableDirectoryTest, GetModelType) { } } +// A test that roughly mimics the directory interaction that occurs when a +// bookmark folder and entry are created then synced for the first time. It is +// a more common variant of the 'DeletedAndUnsyncedChild' scenario tested below. +TEST_F(SyncableDirectoryTest, ChangeEntryIDAndUpdateChildren_ParentAndChild) { + TestIdFactory id_factory; + Id orig_parent_id; + Id orig_child_id; + + { + // Create two client-side items, a parent and child. + WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get()); + + MutableEntry parent(&trans, CREATE, id_factory.root(), "parent"); + parent.Put(IS_DIR, true); + parent.Put(IS_UNSYNCED, true); + + MutableEntry child(&trans, CREATE, parent.Get(ID), "child"); + child.Put(IS_UNSYNCED, true); + + orig_parent_id = parent.Get(ID); + orig_child_id = child.Get(ID); + } + + { + // Simulate what happens after committing two items. Their IDs will be + // replaced with server IDs. The child is renamed first, then the parent. + WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get()); + + MutableEntry parent(&trans, GET_BY_ID, orig_parent_id); + MutableEntry child(&trans, GET_BY_ID, orig_child_id); + + ChangeEntryIDAndUpdateChildren(&trans, &child, id_factory.NewServerId()); + child.Put(IS_UNSYNCED, false); + child.Put(BASE_VERSION, 1); + child.Put(SERVER_VERSION, 1); + + ChangeEntryIDAndUpdateChildren(&trans, &parent, id_factory.NewServerId()); + parent.Put(IS_UNSYNCED, false); + parent.Put(BASE_VERSION, 1); + parent.Put(SERVER_VERSION, 1); + } + + // Final check for validity. + EXPECT_EQ(OPENED, SimulateSaveAndReloadDir()); +} + +// A test based on the scenario where we create a bookmark folder and entry +// locally, but with a twist. In this case, the bookmark is deleted before we +// are able to sync either it or its parent folder. This scenario used to cause +// directory corruption, see crbug.com/125381. +TEST_F(SyncableDirectoryTest, + ChangeEntryIDAndUpdateChildren_DeletedAndUnsyncedChild) { + TestIdFactory id_factory; + Id orig_parent_id; + Id orig_child_id; + + { + // Create two client-side items, a parent and child. + WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get()); + + MutableEntry parent(&trans, CREATE, id_factory.root(), "parent"); + parent.Put(IS_DIR, true); + parent.Put(IS_UNSYNCED, true); + + MutableEntry child(&trans, CREATE, parent.Get(ID), "child"); + child.Put(IS_UNSYNCED, true); + + orig_parent_id = parent.Get(ID); + orig_child_id = child.Get(ID); + } + + { + // Delete the child. + WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get()); + + MutableEntry child(&trans, GET_BY_ID, orig_child_id); + child.Put(IS_DEL, true); + } + + { + // Simulate what happens after committing the parent. Its ID will be + // replaced with server a ID. + WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get()); + + MutableEntry parent(&trans, GET_BY_ID, orig_parent_id); + + ChangeEntryIDAndUpdateChildren(&trans, &parent, id_factory.NewServerId()); + parent.Put(IS_UNSYNCED, false); + parent.Put(BASE_VERSION, 1); + parent.Put(SERVER_VERSION, 1); + } + + // Final check for validity. + EXPECT_EQ(OPENED, SimulateSaveAndReloadDir()); +} + // A variant of SyncableDirectoryTest that uses a real sqlite database. class OnDiskSyncableDirectoryTest : public SyncableDirectoryTest { protected: @@ -1536,6 +1642,32 @@ void SyncableDirectoryTest::ValidateEntry(BaseTransaction* trans, ASSERT_TRUE(is_del == e.Get(IS_DEL)); } +DirOpenResult SyncableDirectoryTest::SimulateSaveAndReloadDir() { + if (!dir_->SaveChanges()) + return FAILED_IN_UNITTEST; + + // Do some tricky things to preserve the backing store. + DirectoryBackingStore* saved_store = dir_->store_; + dir_->store_ = NULL; + + // Close the current directory. + dir_->Close(); + dir_.reset(); + + dir_.reset(new Directory(&encryptor_, &handler_, NULL)); + if (!dir_.get()) + return FAILED_IN_UNITTEST; + DirOpenResult result = dir_->OpenImpl(saved_store, kName, &delegate_, + NullTransactionObserver()); + + // If something went wrong, we need to clear this member. If we don't, + // TearDown() will be guaranteed to crash when it calls SaveChanges(). + if (result != OPENED) + dir_.reset(); + + return result; +} + namespace { class SyncableDirectoryManagement : public testing::Test { |