summaryrefslogtreecommitdiffstats
path: root/sync
diff options
context:
space:
mode:
authorzea <zea@chromium.org>2016-02-03 18:25:40 -0800
committerCommit bot <commit-bot@chromium.org>2016-02-04 02:26:42 +0000
commit09952e8af212f57b630952aa83ca14457642faf5 (patch)
tree48e734fa880cd67a757b74d3c8e56b0ba007bf60 /sync
parent4b6f030a35226c0d6e412b1025e791e5391b1455 (diff)
downloadchromium_src-09952e8af212f57b630952aa83ca14457642faf5.zip
chromium_src-09952e8af212f57b630952aa83ca14457642faf5.tar.gz
chromium_src-09952e8af212f57b630952aa83ca14457642faf5.tar.bz2
[Sync] Fix sync directory initialization to better handle errors
The following changes are made: - Handle failure to change the page size by failing early - Do not attempt to silently recreate the database on error loading. - Fix testing to verify that the page size is updated properly on update. - Add test for case where the database doesn't load properly on upgrade. BUG=583795 Review URL: https://codereview.chromium.org/1661063004 Cr-Commit-Position: refs/heads/master@{#373433}
Diffstat (limited to 'sync')
-rw-r--r--sync/syncable/directory_backing_store.cc52
-rw-r--r--sync/syncable/directory_backing_store.h12
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc60
3 files changed, 87 insertions, 37 deletions
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index c5f1fd5..9d7f4fc 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -40,6 +40,9 @@ namespace syncable {
// Increment this version whenever updating DB tables.
const int32_t kCurrentDBVersion = 90;
+// The current database page size in Kilobytes.
+const int32_t kCurrentPageSizeKB = 32768;
+
// Iterate over the fields of |entry| and bind each to |statement| for
// updating. Returns the number of args bound.
void BindFields(const EntryKernel& entry,
@@ -260,7 +263,7 @@ void UploadModelTypeEntryCount(const int total_specifics_copies,
DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
: dir_name_(dir_name),
- database_page_size_(32768),
+ database_page_size_(kCurrentPageSizeKB),
needs_metas_column_refresh_(false),
needs_share_info_column_refresh_(false) {
DCHECK(base::ThreadTaskRunnerHandle::IsSet());
@@ -270,7 +273,7 @@ DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
sql::Connection* db)
: dir_name_(dir_name),
- database_page_size_(32768),
+ database_page_size_(kCurrentPageSizeKB),
db_(db),
needs_metas_column_refresh_(false),
needs_share_info_column_refresh_(false) {
@@ -413,14 +416,20 @@ bool DirectoryBackingStore::OpenInMemory() {
}
bool DirectoryBackingStore::InitializeTables() {
- int page_size = 0;
- if (GetDatabasePageSize(&page_size) && page_size == 4096) {
- IncreasePageSizeTo32K();
- }
+ if (!UpdatePageSizeIfNecessary())
+ return false;
+
sql::Transaction transaction(db_.get());
if (!transaction.Begin())
return false;
+ if (!db_->DoesTableExist("share_version")) {
+ // Delete the existing database (if any), and create a fresh one.
+ DropAllTables();
+ if (!CreateTables())
+ return false;
+ }
+
int version_on_disk = GetVersion();
// Upgrade from version 67. Version 67 was widely distributed as the original
@@ -569,22 +578,14 @@ bool DirectoryBackingStore::InitializeTables() {
// version.
if (version_on_disk == kCurrentDBVersion && needs_column_refresh()) {
if (!RefreshColumns())
- version_on_disk = 0;
- }
-
- // A final, alternative catch-all migration to simply re-sync everything.
- if (version_on_disk != kCurrentDBVersion) {
- if (version_on_disk > kCurrentDBVersion)
- return false;
-
- // Fallback (re-sync everything) migration path.
- DVLOG(1) << "Old/null sync database, version " << version_on_disk;
- // Delete the existing database (if any), and create a fresh one.
- DropAllTables();
- if (!CreateTables())
return false;
}
+ // In case of error, let the caller decide whether to re-sync from scratch
+ // with a new database.
+ if (version_on_disk != kCurrentDBVersion)
+ return false;
+
return transaction.Commit();
}
@@ -1472,6 +1473,7 @@ bool DirectoryBackingStore::MigrateVersion89To90() {
bool DirectoryBackingStore::CreateTables() {
DVLOG(1) << "First run, creating tables";
+
// Create two little tables share_version and share_info
if (!db_->Execute(
"CREATE TABLE share_version ("
@@ -1711,10 +1713,16 @@ bool DirectoryBackingStore::GetDatabasePageSize(int* page_size) {
return true;
}
-bool DirectoryBackingStore::IncreasePageSizeTo32K() {
- if (!db_->Execute("PRAGMA page_size=32768;") || !Vacuum()) {
+bool DirectoryBackingStore::UpdatePageSizeIfNecessary() {
+ int page_size;
+ if (!GetDatabasePageSize(&page_size))
+ return false;
+ if (page_size == kCurrentPageSizeKB)
+ return true;
+ std::string update_page_size = base::StringPrintf(
+ "PRAGMA page_size=%i;", kCurrentPageSizeKB);
+ if (!db_->Execute(update_page_size.c_str()) || !Vacuum())
return false;
- }
return true;
}
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
index d3375af..50fba9c 100644
--- a/sync/syncable/directory_backing_store.h
+++ b/sync/syncable/directory_backing_store.h
@@ -29,6 +29,7 @@ namespace syncer {
namespace syncable {
SYNC_EXPORT extern const int32_t kCurrentDBVersion;
+SYNC_EXPORT extern const int32_t kCurrentPageSizeKB;
struct ColumnSpec;
@@ -97,6 +98,9 @@ class SYNC_EXPORT DirectoryBackingStore : public base::NonThreadSafe {
virtual void SetCatastrophicErrorHandler(
const base::Closure& catastrophic_error_handler);
+ // Returns true on success, false on error.
+ bool GetDatabasePageSize(int* page_size);
+
protected:
// For test classes.
DirectoryBackingStore(const std::string& dir_name,
@@ -185,8 +189,8 @@ class SYNC_EXPORT DirectoryBackingStore : public base::NonThreadSafe {
void ResetAndCreateConnection();
private:
- friend class TestDirectoryBackingStore;
friend class DirectoryBackingStoreTest;
+ friend class TestDirectoryBackingStore;
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest,
IncreaseDatabasePageSizeFrom4KTo32K);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest,
@@ -196,6 +200,7 @@ class SYNC_EXPORT DirectoryBackingStore : public base::NonThreadSafe {
FRIEND_TEST_ALL_PREFIXES(
DirectoryBackingStoreTest,
CatastrophicErrorHandler_InvocationDuringSaveChanges);
+ FRIEND_TEST_ALL_PREFIXES(MigrationTest, ToCurrentVersion);
// Drop all tables in preparation for reinitialization.
void DropAllTables();
@@ -235,10 +240,7 @@ class SYNC_EXPORT DirectoryBackingStore : public base::NonThreadSafe {
bool Vacuum();
// Returns true on success, false on error.
- bool IncreasePageSizeTo32K();
-
- // Returns true on success, false on error.
- bool GetDatabasePageSize(int* page_size);
+ bool UpdatePageSizeIfNecessary();
// Prepares |save_statement| for saving entries in |table|.
void PrepareSaveEntryStatement(EntryTable table,
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
index 2c1edb3..f5ce6b4 100644
--- a/sync/syncable/directory_backing_store_unittest.cc
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -57,6 +57,7 @@ scoped_ptr<EntryKernel> CreateEntry(int id, const std::string &id_suffix) {
} // namespace
+SYNC_EXPORT extern const int32_t kCurrentPageSizeKB;
SYNC_EXPORT extern const int32_t kCurrentDBVersion;
class MigrationTest : public testing::TestWithParam<int> {
@@ -3578,7 +3579,9 @@ TEST_F(DirectoryBackingStoreTest, DetectInvalidPosition) {
TEST_P(MigrationTest, ToCurrentVersion) {
sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ // Assume all old versions have an old page size.
+ connection.set_page_size(4096);
switch (GetParam()) {
case 67:
SetUpVersion67Database(&connection);
@@ -3660,6 +3663,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
// at the new schema. See the MigrateToLatestAndDump test case.
FAIL() << "Need to supply database dump for version " << GetParam();
}
+ connection.Close();
syncable::Directory::KernelLoadInfo dir_info;
Directory::MetahandlesMap handles_map;
@@ -3668,16 +3672,22 @@ TEST_P(MigrationTest, ToCurrentVersion) {
STLValueDeleter<Directory::MetahandlesMap> index_deleter(&handles_map);
{
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
+ scoped_ptr<OnDiskDirectoryBackingStore> dbs(
+ new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
ASSERT_EQ(OPENED, dbs->Load(&handles_map, &delete_journals,
&metahandles_to_purge, &dir_info));
if (!metahandles_to_purge.empty())
- dbs->DeleteEntries(metahandles_to_purge);
+ dbs->DeleteEntries(DirectoryBackingStore::METAS_TABLE,
+ metahandles_to_purge);
ASSERT_FALSE(dbs->needs_column_refresh());
ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
+ int pageSize = 0;
+ ASSERT_TRUE(dbs->GetDatabasePageSize(&pageSize));
+ ASSERT_EQ(kCurrentPageSizeKB, pageSize);
}
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+
// Columns deleted in Version 67.
ASSERT_FALSE(connection.DoesColumnExist("metas", "name"));
ASSERT_FALSE(connection.DoesColumnExist("metas", "unsanitized_name"));
@@ -4035,6 +4045,37 @@ TEST_F(DirectoryBackingStoreTest, MinorCorruption) {
}
}
+TEST_F(DirectoryBackingStoreTest, MinorCorruptionAndUpgrade) {
+ {
+ scoped_ptr<OnDiskDirectoryBackingStore> dbs(
+ new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
+ EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
+ }
+
+ // Make the node look outdated with an invalid version.
+ {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ ASSERT_TRUE(connection.Execute("UPDATE share_version SET data = 0;"));
+ ASSERT_TRUE(connection.Execute("PRAGMA page_size=4096;"));
+ ASSERT_TRUE(connection.Execute("VACUUM;"));
+ }
+
+ {
+ scoped_ptr<OnDiskDirectoryBackingStoreForTest> dbs(
+ new OnDiskDirectoryBackingStoreForTest(GetUsername(),
+ GetDatabasePath()));
+ dbs->SetCatastrophicErrorHandler(base::Bind(&base::DoNothing));
+
+ EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
+ EXPECT_TRUE(dbs->DidFailFirstOpenAttempt());
+
+ int page_size = 0;
+ ASSERT_TRUE(dbs->GetDatabasePageSize(&page_size));
+ EXPECT_EQ(kCurrentPageSizeKB, page_size);
+ }
+}
+
TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -4119,13 +4160,12 @@ TEST_F(DirectoryBackingStoreTest, IncreaseDatabasePageSizeFrom4KTo32K) {
// Check if update is successful.
int pageSize = 0;
- dbs->GetDatabasePageSize(&pageSize);
- EXPECT_NE(32768, pageSize);
- dbs->db_->set_page_size(32768);
- dbs->IncreasePageSizeTo32K();
+ EXPECT_TRUE(dbs->GetDatabasePageSize(&pageSize));
+ EXPECT_NE(kCurrentPageSizeKB, pageSize);
+ EXPECT_TRUE(dbs->UpdatePageSizeIfNecessary());
pageSize = 0;
- dbs->GetDatabasePageSize(&pageSize);
- EXPECT_EQ(32768, pageSize);
+ EXPECT_TRUE(dbs->GetDatabasePageSize(&pageSize));
+ EXPECT_EQ(kCurrentPageSizeKB, pageSize);
}
// See that a catastrophic error handler remains set across instances of the