summaryrefslogtreecommitdiffstats
path: root/sync/syncable
diff options
context:
space:
mode:
authorrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-15 22:47:20 +0000
committerrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-15 22:47:20 +0000
commit5a05b1de6bb31e66f570b320b6b507e0e2d5798b (patch)
tree23b917abaac13ec29bc29dc17179dc4f136e0903 /sync/syncable
parent7b536cbc0794e6daf602b33c59a638cf24f4253e (diff)
downloadchromium_src-5a05b1de6bb31e66f570b320b6b507e0e2d5798b.zip
chromium_src-5a05b1de6bb31e66f570b320b6b507e0e2d5798b.tar.gz
chromium_src-5a05b1de6bb31e66f570b320b6b507e0e2d5798b.tar.bz2
sync: Improve handling of bad UniquePos (retry)
Retry: The first attempt had a memory leak. It seems that this leak existed before this CL, but no code exercised it. This updated CL includes a scoped deleter that should fix it. Original commits message was: Makes the client assign a valid position to incoming bookmarks if the server has not populated the required fields. This code should never be triggered unless there is a bug in the server. This risks reordering users' bookmarks, but that's probably preferable to a crash. This fallback code is still protected by a NOTREACHED(), as before. Detects bookmarks that do not have valid position information during database load. If these corrupted bookmarks are detected, the entire database is declared to be corrupt. Sync will then re-download all of the user's data, which should fix the problem. BUG=367247 Review URL: https://codereview.chromium.org/283143002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@270837 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sync/syncable')
-rw-r--r--sync/syncable/directory.cc5
-rw-r--r--sync/syncable/directory_backing_store.cc10
-rw-r--r--sync/syncable/directory_unittest.cc37
-rw-r--r--sync/syncable/syncable_unittest.cc7
4 files changed, 55 insertions, 4 deletions
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index cebb2d4..e523bcd 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -167,6 +167,11 @@ DirOpenResult Directory::OpenImpl(
// Temporary indices before kernel_ initialized in case Load fails. We 0(1)
// swap these later.
Directory::MetahandlesMap tmp_handles_map;
+
+ // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
+ // the swap in the success case.
+ STLValueDeleter<Directory::MetahandlesMap> deleter(&tmp_handles_map);
+
JournalIndex delete_journals;
DirOpenResult result =
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index ec28a53..55a01e6 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -123,6 +123,16 @@ scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
kernel->mutable_ref(static_cast<AttachmentMetadataField>(i)).ParseFromArray(
statement->ColumnBlob(i), statement->ColumnByteLength(i));
}
+
+ // Sanity check on positions. We risk strange and rare crashes if our
+ // assumptions about unique position values are broken.
+ if (kernel->ShouldMaintainPosition() &&
+ !kernel->ref(UNIQUE_POSITION).IsValid()) {
+ DVLOG(1) << "Unpacked invalid position on an entity that should have a "
+ << "valid position. Assuming the DB is corrupt.";
+ return scoped_ptr<EntryKernel>();
+ }
+
return kernel.Pass();
}
diff --git a/sync/syncable/directory_unittest.cc b/sync/syncable/directory_unittest.cc
index f58f54f..6b6b8a2 100644
--- a/sync/syncable/directory_unittest.cc
+++ b/sync/syncable/directory_unittest.cc
@@ -81,6 +81,11 @@ DirOpenResult SyncableDirectoryTest::ReopenDirectory() {
DirOpenResult open_result =
dir_->Open(kDirectoryName, &delegate_, NullTransactionObserver());
+
+ if (open_result != OPENED) {
+ dir_.reset();
+ }
+
return open_result;
}
@@ -1221,6 +1226,38 @@ TEST_F(SyncableDirectoryTest, PositionWithNullSurvivesSaveAndReload) {
}
}
+// Any item with BOOKMARKS in their local specifics should have a valid local
+// unique position. If there is an item in the loaded DB that does not match
+// this criteria, we consider the whole DB to be corrupt.
+TEST_F(SyncableDirectoryTest, BadPositionCountsAsCorruption) {
+ TestIdFactory id_factory;
+
+ {
+ WriteTransaction trans(FROM_HERE, UNITTEST, dir().get());
+
+ MutableEntry parent(&trans, CREATE, BOOKMARKS, id_factory.root(), "parent");
+ parent.PutIsDir(true);
+ parent.PutIsUnsynced(true);
+
+ // The code is littered with DCHECKs that try to stop us from doing what
+ // we're about to do. Our work-around is to create a bookmark based on
+ // a server update, then update its local specifics without updating its
+ // local unique position.
+
+ MutableEntry child(
+ &trans, CREATE_NEW_UPDATE_ITEM, id_factory.MakeServer("child"));
+ sync_pb::EntitySpecifics specifics;
+ AddDefaultFieldValue(BOOKMARKS, &specifics);
+ child.PutIsUnappliedUpdate(true);
+ child.PutSpecifics(specifics);
+
+ EXPECT_TRUE(child.ShouldMaintainPosition());
+ EXPECT_TRUE(!child.GetUniquePosition().IsValid());
+ }
+
+ EXPECT_EQ(FAILED_DATABASE_CORRUPT, SimulateSaveAndReloadDir());
+}
+
TEST_F(SyncableDirectoryTest, General) {
int64 written_metahandle;
const Id id = TestIdFactory::FromNumber(99);
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index 021e4bd..227b406 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -344,7 +344,7 @@ TEST_F(OnDiskSyncableDirectoryTest,
specifics.mutable_bookmark()->set_favicon("PNG");
specifics.mutable_bookmark()->set_url("http://nowhere");
create.PutSpecifics(specifics);
- update.PutSpecifics(specifics);
+ update.PutServerSpecifics(specifics);
create_pre_save = create.GetKernelCopy();
update_pre_save = update.GetKernelCopy();
create_id = create.GetId();
@@ -379,10 +379,9 @@ TEST_F(OnDiskSyncableDirectoryTest,
(i == TRANSACTION_VERSION ? 1 : 0),
create_post_save.ref((Int64Field)i))
<< "int64 field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((Int64Field)i) +
- (i == TRANSACTION_VERSION ? 1 : 0),
+ EXPECT_EQ(update_pre_save.ref((Int64Field)i),
update_post_save.ref((Int64Field)i))
- << "int64 field #" << i << " changed during save/load";
+ << "int64 field #" << i << " changed during save/load";
}
for ( ; i < TIME_FIELDS_END ; ++i) {
EXPECT_EQ(create_pre_save.ref((TimeField)i),