summaryrefslogtreecommitdiffstats
path: root/chrome/browser/sync/syncable
diff options
context:
space:
mode:
authorakalin@chromium.org <akalin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-15 09:35:42 +0000
committerakalin@chromium.org <akalin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-15 09:35:42 +0000
commitc1c32c85357f14756247b04b8b5ae41b05bf2e16 (patch)
tree58f25f64e1fa592e8daf276ef69901cd2218f929 /chrome/browser/sync/syncable
parent63ee33bde2ec8471a70f0f0ec6a1962dd07fc8ab (diff)
downloadchromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.zip
chromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.tar.gz
chromium_src-c1c32c85357f14756247b04b8b5ae41b05bf2e16.tar.bz2
[Sync] Move 'sync' target to sync/
Also move related test files. Move WriteNode::UpdateEntryWithEncryption to nigori_util.h. Clean up defines and dependencies. In particular, get rid of SYNC_ENGINE_VERSION_STRING and hard-code the string in the single place it's used. Rename data_encryption.* to data_encryption_win.* and add a pragma for crypt32.lib. Clean up exit-time constructor warnings in sync{able,er}_unittest.cc. Remove some unused files. BUG=117585 TEST= TBR=jhawkins@chromium.org Review URL: https://chromiumcodereview.appspot.com/9699057 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@126872 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser/sync/syncable')
-rw-r--r--chrome/browser/sync/syncable/DEPS19
-rw-r--r--chrome/browser/sync/syncable/blob.h19
-rw-r--r--chrome/browser/sync/syncable/dir_open_result.h20
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.cc1076
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h166
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store_unittest.cc2162
-rw-r--r--chrome/browser/sync/syncable/directory_change_delegate.h45
-rw-r--r--chrome/browser/sync/syncable/in_memory_directory_backing_store.cc32
-rw-r--r--chrome/browser/sync/syncable/in_memory_directory_backing_store.h32
-rw-r--r--chrome/browser/sync/syncable/model_type.cc542
-rw-r--r--chrome/browser/sync/syncable/model_type.h175
-rw-r--r--chrome/browser/sync/syncable/model_type_payload_map.cc100
-rw-r--r--chrome/browser/sync/syncable/model_type_payload_map.h60
-rw-r--r--chrome/browser/sync/syncable/model_type_payload_map_unittest.cc43
-rw-r--r--chrome/browser/sync/syncable/model_type_test_util.cc52
-rw-r--r--chrome/browser/sync/syncable/model_type_test_util.h26
-rw-r--r--chrome/browser/sync/syncable/model_type_unittest.cc76
-rw-r--r--chrome/browser/sync/syncable/on_disk_directory_backing_store.cc41
-rw-r--r--chrome/browser/sync/syncable/on_disk_directory_backing_store.h30
-rw-r--r--chrome/browser/sync/syncable/syncable-inl.h22
-rw-r--r--chrome/browser/sync/syncable/syncable.cc2405
-rw-r--r--chrome/browser/sync/syncable/syncable.h1349
-rw-r--r--chrome/browser/sync/syncable/syncable_changes_version.h30
-rw-r--r--chrome/browser/sync/syncable/syncable_columns.h74
-rw-r--r--chrome/browser/sync/syncable/syncable_enum_conversions.cc164
-rw-r--r--chrome/browser/sync/syncable/syncable_enum_conversions.h45
-rw-r--r--chrome/browser/sync/syncable/syncable_enum_conversions_unittest.cc85
-rw-r--r--chrome/browser/sync/syncable/syncable_id.cc77
-rw-r--r--chrome/browser/sync/syncable/syncable_id.h134
-rw-r--r--chrome/browser/sync/syncable/syncable_id_unittest.cc96
-rw-r--r--chrome/browser/sync/syncable/syncable_mock.cc20
-rw-r--r--chrome/browser/sync/syncable/syncable_mock.h47
-rw-r--r--chrome/browser/sync/syncable/syncable_unittest.cc1743
-rw-r--r--chrome/browser/sync/syncable/transaction_observer.h25
34 files changed, 0 insertions, 11032 deletions
diff --git a/chrome/browser/sync/syncable/DEPS b/chrome/browser/sync/syncable/DEPS
deleted file mode 100644
index bca9e7e..0000000
--- a/chrome/browser/sync/syncable/DEPS
+++ /dev/null
@@ -1,19 +0,0 @@
-include_rules = [
- "-chrome",
- "+chrome/test/base",
-
- "+chrome/browser/sync/protocol",
- "+chrome/browser/sync/sessions",
- "+chrome/browser/sync/syncable",
- "+chrome/browser/sync/test",
- "+chrome/browser/sync/util",
-
- # this file is weird.
- "+chrome/browser/sync/engine/syncproto.h",
-
- # maybe this file should live in syncable?
- "+chrome/browser/sync/engine/model_safe_worker.h",
-
- "-chrome/browser/sync/internal_api",
- "+chrome/browser/sync/internal_api/includes",
-]
diff --git a/chrome/browser/sync/syncable/blob.h b/chrome/browser/sync/syncable/blob.h
deleted file mode 100644
index 54d1d00..0000000
--- a/chrome/browser/sync/syncable/blob.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
-#pragma once
-
-#include <vector>
-
-#include "base/basictypes.h" // For uint8.
-
-namespace syncable {
-
-typedef std::vector<uint8> Blob;
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
diff --git a/chrome/browser/sync/syncable/dir_open_result.h b/chrome/browser/sync/syncable/dir_open_result.h
deleted file mode 100644
index 4f082d5..0000000
--- a/chrome/browser/sync/syncable/dir_open_result.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
-#pragma once
-
-namespace syncable {
-enum DirOpenResult { OPENED, // success.
- FAILED_NEWER_VERSION, // DB version is too new.
- FAILED_MAKE_REPOSITORY, // Couldn't create subdir.
- FAILED_OPEN_DATABASE, // sqlite_open() failed.
- FAILED_DISK_FULL, // The disk is full.
- FAILED_DATABASE_CORRUPT, // Something is wrong with the DB
- FAILED_LOGICAL_CORRUPTION, // Invalid database contents
-};
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
deleted file mode 100644
index c409be4..0000000
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ /dev/null
@@ -1,1076 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-
-#include "build/build_config.h"
-
-#include <limits>
-
-#include "base/base64.h"
-#include "base/file_util.h"
-#include "base/hash_tables.h"
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/rand_util.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
-#include "base/stringprintf.h"
-#include "base/time.h"
-#include "chrome/browser/sync/protocol/service_constants.h"
-#include "chrome/browser/sync/syncable/syncable-inl.h"
-#include "chrome/browser/sync/syncable/syncable_columns.h"
-#include "chrome/browser/sync/util/time.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-#include "sql/transaction.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-
-using std::string;
-
-namespace syncable {
-
-// This just has to be big enough to hold an UPDATE or INSERT statement that
-// modifies all the columns in the entry table.
-static const string::size_type kUpdateStatementBufferSize = 2048;
-
-// Increment this version whenever updating DB tables.
-extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
-const int32 kCurrentDBVersion = 78;
-
-// Iterate over the fields of |entry| and bind each to |statement| for
-// updating. Returns the number of args bound.
-void BindFields(const EntryKernel& entry,
- sql::Statement* statement) {
- int index = 0;
- int i = 0;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- statement->BindInt64(index++,
- browser_sync::TimeToProtoTime(
- entry.ref(static_cast<TimeField>(i))));
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
- }
- for ( ; i < BIT_FIELDS_END; ++i) {
- statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
- }
- std::string temp;
- for ( ; i < PROTO_FIELDS_END; ++i) {
- entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
- statement->BindBlob(index++, temp.data(), temp.length());
- }
-}
-
-// The caller owns the returned EntryKernel*. Assumes the statement currently
-// points to a valid row in the metas table.
-EntryKernel* UnpackEntry(sql::Statement* statement) {
- EntryKernel* kernel = new EntryKernel();
- DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
- int i = 0;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- kernel->put(static_cast<TimeField>(i),
- browser_sync::ProtoTimeToTime(statement->ColumnInt64(i)));
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- kernel->mutable_ref(static_cast<IdField>(i)).s_ =
- statement->ColumnString(i);
- }
- for ( ; i < BIT_FIELDS_END; ++i) {
- kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- kernel->put(static_cast<StringField>(i),
- statement->ColumnString(i));
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
- statement->ColumnBlob(i), statement->ColumnByteLength(i));
- }
- return kernel;
-}
-
-namespace {
-
-string ComposeCreateTableColumnSpecs() {
- const ColumnSpec* begin = g_metas_columns;
- const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
- string query;
- query.reserve(kUpdateStatementBufferSize);
- char separator = '(';
- for (const ColumnSpec* column = begin; column != end; ++column) {
- query.push_back(separator);
- separator = ',';
- query.append(column->name);
- query.push_back(' ');
- query.append(column->spec);
- }
- query.push_back(')');
- return query;
-}
-
-void AppendColumnList(std::string* output) {
- const char* joiner = " ";
- // Be explicit in SELECT order to match up with UnpackEntry.
- for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
- output->append(joiner);
- output->append(ColumnName(i));
- joiner = ", ";
- }
-}
-
-} // namespace
-
-///////////////////////////////////////////////////////////////////////////////
-// DirectoryBackingStore implementation.
-
-DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
- : db_(new sql::Connection()),
- dir_name_(dir_name),
- needs_column_refresh_(false) {
-}
-
-DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
- sql::Connection* db)
- : db_(db),
- dir_name_(dir_name),
- needs_column_refresh_(false) {
-}
-
-DirectoryBackingStore::~DirectoryBackingStore() {
-}
-
-bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
- if (handles.empty())
- return true;
-
- sql::Statement statement(db_->GetCachedStatement(
- SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
-
- for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
- ++i) {
- statement.BindInt64(0, *i);
- if (!statement.Run())
- return false;
- statement.Reset();
- }
- return true;
-}
-
-bool DirectoryBackingStore::SaveChanges(
- const Directory::SaveChangesSnapshot& snapshot) {
- DCHECK(CalledOnValidThread());
- DCHECK(db_->is_open());
-
- // Back out early if there is nothing to write.
- bool save_info =
- (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
- if (snapshot.dirty_metas.size() < 1 && !save_info)
- return true;
-
- sql::Transaction transaction(db_.get());
- if (!transaction.Begin())
- return false;
-
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- DCHECK(i->is_dirty());
- if (!SaveEntryToDB(*i))
- return false;
- }
-
- if (!DeleteEntries(snapshot.metahandles_to_purge))
- return false;
-
- if (save_info) {
- const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
- sql::Statement s1(db_->GetCachedStatement(
- SQL_FROM_HERE,
- "UPDATE share_info "
- "SET store_birthday = ?, "
- "next_id = ?, "
- "notification_state = ?"));
- s1.BindString(0, info.store_birthday);
- s1.BindInt64(1, info.next_id);
- s1.BindBlob(2, info.notification_state.data(),
- info.notification_state.size());
-
- if (!s1.Run())
- return false;
- DCHECK_EQ(db_->GetLastChangeCount(), 1);
-
- sql::Statement s2(db_->GetCachedStatement(
- SQL_FROM_HERE,
- "INSERT OR REPLACE "
- "INTO models (model_id, progress_marker, initial_sync_ended) "
- "VALUES (?, ?, ?)"));
-
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- // We persist not ModelType but rather a protobuf-derived ID.
- string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
- string progress_marker;
- info.download_progress[i].SerializeToString(&progress_marker);
- s2.BindBlob(0, model_id.data(), model_id.length());
- s2.BindBlob(1, progress_marker.data(), progress_marker.length());
- s2.BindBool(2, info.initial_sync_ended.Has(ModelTypeFromInt(i)));
- if (!s2.Run())
- return false;
- DCHECK_EQ(db_->GetLastChangeCount(), 1);
- s2.Reset();
- }
- }
-
- return transaction.Commit();
-}
-
-bool DirectoryBackingStore::InitializeTables() {
- sql::Transaction transaction(db_.get());
- if (!transaction.Begin())
- return false;
-
- int version_on_disk = GetVersion();
-
- // Upgrade from version 67. Version 67 was widely distributed as the original
- // Bookmark Sync release. Version 68 removed unique naming.
- if (version_on_disk == 67) {
- if (MigrateVersion67To68())
- version_on_disk = 68;
- }
- // Version 69 introduced additional datatypes.
- if (version_on_disk == 68) {
- if (MigrateVersion68To69())
- version_on_disk = 69;
- }
-
- if (version_on_disk == 69) {
- if (MigrateVersion69To70())
- version_on_disk = 70;
- }
-
- // Version 71 changed the sync progress information to be per-datatype.
- if (version_on_disk == 70) {
- if (MigrateVersion70To71())
- version_on_disk = 71;
- }
-
- // Version 72 removed extended attributes, a legacy way to do extensible
- // key/value information, stored in their own table.
- if (version_on_disk == 71) {
- if (MigrateVersion71To72())
- version_on_disk = 72;
- }
-
- // Version 73 added a field for notification state.
- if (version_on_disk == 72) {
- if (MigrateVersion72To73())
- version_on_disk = 73;
- }
-
- // Version 74 added state for the autofill migration.
- if (version_on_disk == 73) {
- if (MigrateVersion73To74())
- version_on_disk = 74;
- }
-
- // Version 75 migrated from int64-based timestamps to per-datatype tokens.
- if (version_on_disk == 74) {
- if (MigrateVersion74To75())
- version_on_disk = 75;
- }
-
- // Version 76 removed all (5) autofill migration related columns.
- if (version_on_disk == 75) {
- if (MigrateVersion75To76())
- version_on_disk = 76;
- }
-
- // Version 77 standardized all time fields to ms since the Unix
- // epoch.
- if (version_on_disk == 76) {
- if (MigrateVersion76To77())
- version_on_disk = 77;
- }
-
- // Version 78 added the column base_server_specifics to the metas table.
- if (version_on_disk == 77) {
- if (MigrateVersion77To78())
- version_on_disk = 78;
- }
-
- // If one of the migrations requested it, drop columns that aren't current.
- // It's only safe to do this after migrating all the way to the current
- // version.
- if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
- if (!RefreshColumns())
- version_on_disk = 0;
- }
-
- // A final, alternative catch-all migration to simply re-sync everything.
- //
- // TODO(rlarocque): It's wrong to recreate the database here unless the higher
- // layers were expecting us to do so. See crbug.com/103824. We must leave
- // this code as is for now because this is the code that ends up creating the
- // database in the first time sync case, where the higher layers are expecting
- // us to create a fresh database. The solution to this should be to implement
- // crbug.com/105018.
- if (version_on_disk != kCurrentDBVersion) {
- if (version_on_disk > kCurrentDBVersion)
- return FAILED_NEWER_VERSION;
-
- // Fallback (re-sync everything) migration path.
- DVLOG(1) << "Old/null sync database, version " << version_on_disk;
- // Delete the existing database (if any), and create a fresh one.
- DropAllTables();
- if (!CreateTables())
- return false;
- }
-
- sql::Statement s(db_->GetUniqueStatement(
- "SELECT db_create_version, db_create_time FROM share_info"));
- if (!s.Step())
- return false;
- string db_create_version = s.ColumnString(0);
- int db_create_time = s.ColumnInt(1);
- DVLOG(1) << "DB created at " << db_create_time << " by version " <<
- db_create_version;
-
- return transaction.Commit();
-}
-
-// This function drops unused columns by creating a new table that contains only
-// the currently used columns then copying all rows from the old tables into
-// this new one. The tables are then rearranged so the new replaces the old.
-bool DirectoryBackingStore::RefreshColumns() {
- DCHECK(needs_column_refresh_);
-
- // Create a new table named temp_metas.
- SafeDropTable("temp_metas");
- if (!CreateMetasTable(true))
- return false;
-
- // Populate temp_metas from metas.
- //
- // At this point, the metas table may contain columns belonging to obsolete
- // schema versions. This statement explicitly lists only the columns that
- // belong to the current schema version, so the obsolete columns will be
- // effectively dropped once we rename temp_metas over top of metas.
- std::string query = "INSERT INTO temp_metas (";
- AppendColumnList(&query);
- query.append(") SELECT ");
- AppendColumnList(&query);
- query.append(" FROM metas");
- if (!db_->Execute(query.c_str()))
- return false;
-
- // Drop metas.
- SafeDropTable("metas");
-
- // Rename temp_metas -> metas.
- if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
- return false;
-
- // Repeat the process for share_info.
- SafeDropTable("temp_share_info");
- if (!CreateShareInfoTable(true))
- return false;
-
- if (!db_->Execute(
- "INSERT INTO temp_share_info (id, name, store_birthday, "
- "db_create_version, db_create_time, next_id, cache_guid,"
- "notification_state) "
- "SELECT id, name, store_birthday, db_create_version, "
- "db_create_time, next_id, cache_guid, notification_state "
- "FROM share_info"))
- return false;
-
- SafeDropTable("share_info");
- if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
- return false;
-
- needs_column_refresh_ = false;
- return true;
-}
-
-bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
- string select;
- select.reserve(kUpdateStatementBufferSize);
- select.append("SELECT ");
- AppendColumnList(&select);
- select.append(" FROM metas ");
-
- sql::Statement s(db_->GetUniqueStatement(select.c_str()));
-
- while (s.Step()) {
- EntryKernel *kernel = UnpackEntry(&s);
- entry_bucket->insert(kernel);
- }
- return s.Succeeded();
-}
-
-bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT store_birthday, next_id, cache_guid, notification_state "
- "FROM share_info"));
- if (!s.Step())
- return false;
-
- info->kernel_info.store_birthday = s.ColumnString(0);
- info->kernel_info.next_id = s.ColumnInt64(1);
- info->cache_guid = s.ColumnString(2);
- s.ColumnBlobAsString(3, &(info->kernel_info.notification_state));
-
- // Verify there was only one row returned.
- DCHECK(!s.Step());
- DCHECK(s.Succeeded());
- }
-
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT model_id, progress_marker, initial_sync_ended "
- "FROM models"));
-
- while (s.Step()) {
- ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
- s.ColumnByteLength(0));
- if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
- info->kernel_info.download_progress[type].ParseFromArray(
- s.ColumnBlob(1), s.ColumnByteLength(1));
- if (s.ColumnBool(2))
- info->kernel_info.initial_sync_ended.Put(type);
- }
- }
- if (!s.Succeeded())
- return false;
- }
- {
- sql::Statement s(
- db_->GetUniqueStatement(
- "SELECT MAX(metahandle) FROM metas"));
- if (!s.Step())
- return false;
-
- info->max_metahandle = s.ColumnInt64(0);
-
- // Verify only one row was returned.
- DCHECK(!s.Step());
- DCHECK(s.Succeeded());
- }
- return true;
-}
-
-bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
- // This statement is constructed at runtime, so we can't use
- // GetCachedStatement() to let the Connection cache it. We will construct
- // and cache it ourselves the first time this function is called.
- if (!save_entry_statement_.is_valid()) {
- string query;
- query.reserve(kUpdateStatementBufferSize);
- query.append("INSERT OR REPLACE INTO metas ");
- string values;
- values.reserve(kUpdateStatementBufferSize);
- values.append("VALUES ");
- const char* separator = "( ";
- int i = 0;
- for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) {
- query.append(separator);
- values.append(separator);
- separator = ", ";
- query.append(ColumnName(i));
- values.append("?");
- }
- query.append(" ) ");
- values.append(" )");
- query.append(values);
-
- save_entry_statement_.Assign(
- db_->GetUniqueStatement(query.c_str()));
- } else {
- save_entry_statement_.Reset();
- }
-
- BindFields(entry, &save_entry_statement_);
- return save_entry_statement_.Run();
-}
-
-bool DirectoryBackingStore::DropDeletedEntries() {
- return db_->Execute("DELETE FROM metas "
- "WHERE is_del > 0 "
- "AND is_unsynced < 1 "
- "AND is_unapplied_update < 1");
-}
-
-bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
- string query = "DROP TABLE IF EXISTS ";
- query.append(table_name);
- return db_->Execute(query.c_str());
-}
-
-void DirectoryBackingStore::DropAllTables() {
- SafeDropTable("metas");
- SafeDropTable("temp_metas");
- SafeDropTable("share_info");
- SafeDropTable("temp_share_info");
- SafeDropTable("share_version");
- SafeDropTable("extended_attributes");
- SafeDropTable("models");
- SafeDropTable("temp_models");
- needs_column_refresh_ = false;
-}
-
-// static
-ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
- const void* data, int size) {
- sync_pb::EntitySpecifics specifics;
- if (!specifics.ParseFromArray(data, size))
- return syncable::UNSPECIFIED;
- return syncable::GetModelTypeFromSpecifics(specifics);
-}
-
-// static
-string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
- sync_pb::EntitySpecifics specifics;
- syncable::AddDefaultFieldValue(model_type, &specifics);
- return specifics.SerializeAsString();
-}
-
-// static
-std::string DirectoryBackingStore::GenerateCacheGUID() {
- // Generate a GUID with 128 bits of randomness.
- const int kGuidBytes = 128 / 8;
- std::string guid;
- base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
- return guid;
-}
-
-bool DirectoryBackingStore::MigrateToSpecifics(
- const char* old_columns,
- const char* specifics_column,
- void (*handler_function)(sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value)) {
- std::string query_sql = base::StringPrintf(
- "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
- std::string update_sql = base::StringPrintf(
- "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
-
- sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
- sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
-
- while (query.Step()) {
- int64 metahandle = query.ColumnInt64(0);
- std::string new_value_bytes;
- query.ColumnBlobAsString(1, &new_value_bytes);
- sync_pb::EntitySpecifics new_value;
- new_value.ParseFromString(new_value_bytes);
- handler_function(&query, 2, &new_value);
- new_value.SerializeToString(&new_value_bytes);
-
- update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
- update.BindInt64(1, metahandle);
- if (!update.Run())
- return false;
- update.Reset();
- }
- return query.Succeeded();
-}
-
-bool DirectoryBackingStore::SetVersion(int version) {
- sql::Statement s(db_->GetCachedStatement(
- SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
- s.BindInt(0, version);
-
- return s.Run();
-}
-
-int DirectoryBackingStore::GetVersion() {
- if (!db_->DoesTableExist("share_version"))
- return 0;
-
- sql::Statement statement(db_->GetUniqueStatement(
- "SELECT data FROM share_version"));
- if (statement.Step()) {
- return statement.ColumnInt(0);
- } else {
- return 0;
- }
-}
-
-bool DirectoryBackingStore::MigrateVersion67To68() {
- // This change simply removed three columns:
- // string NAME
- // string UNSANITIZED_NAME
- // string SERVER_NAME
- // No data migration is necessary, but we should do a column refresh.
- SetVersion(68);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion69To70() {
- // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
- SetVersion(70);
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
- return false;
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
- return false;
- needs_column_refresh_ = true;
-
- if (!db_->Execute(
- "UPDATE metas SET unique_server_tag = singleton_tag"))
- return false;
-
- return true;
-}
-
-namespace {
-
-// Callback passed to MigrateToSpecifics for the v68->v69 migration. See
-// MigrateVersion68To69().
-void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value) {
- // Extract data from the column trio we expect.
- bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
- std::string old_url = old_value_query->ColumnString(old_value_column + 1);
- std::string old_favicon;
- old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
- bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
-
- if (old_is_bookmark_object) {
- sync_pb::BookmarkSpecifics* bookmark_data =
- mutable_new_value->mutable_bookmark();
- if (!old_is_dir) {
- bookmark_data->set_url(old_url);
- bookmark_data->set_favicon(old_favicon);
- }
- }
-}
-
-} // namespace
-
-bool DirectoryBackingStore::MigrateVersion68To69() {
- // In Version 68, there were columns on table 'metas':
- // string BOOKMARK_URL
- // string SERVER_BOOKMARK_URL
- // blob BOOKMARK_FAVICON
- // blob SERVER_BOOKMARK_FAVICON
- // In version 69, these columns went away in favor of storing
- // a serialized EntrySpecifics protobuf in the columns:
- // protobuf blob SPECIFICS
- // protobuf blob SERVER_SPECIFICS
- // For bookmarks, EntrySpecifics is extended as per
- // bookmark_specifics.proto. This migration converts bookmarks from the
- // former scheme to the latter scheme.
-
- // First, add the two new columns to the schema.
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN specifics blob"))
- return false;
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN server_specifics blob"))
- return false;
-
- // Next, fold data from the old columns into the new protobuf columns.
- if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
- "bookmark_favicon, is_dir"),
- "specifics",
- &EncodeBookmarkURLAndFavicon)) {
- return false;
- }
- if (!MigrateToSpecifics(("server_is_bookmark_object, "
- "server_bookmark_url, "
- "server_bookmark_favicon, "
- "server_is_dir"),
- "server_specifics",
- &EncodeBookmarkURLAndFavicon)) {
- return false;
- }
-
- // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
- // ModelType: it shouldn't have BookmarkSpecifics.
- if (!db_->Execute(
- "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
- "singleton_tag IN ('google_chrome')"))
- return false;
-
- SetVersion(69);
- needs_column_refresh_ = true; // Trigger deletion of old columns.
- return true;
-}
-
-// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
-// were removed from the share_info table. They were replaced by
-// the 'models' table, which has these values on a per-datatype basis.
-bool DirectoryBackingStore::MigrateVersion70To71() {
- if (!CreateV71ModelsTable())
- return false;
-
- // Move data from the old share_info columns to the new models table.
- {
- sql::Statement fetch(db_->GetUniqueStatement(
- "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
- if (!fetch.Step())
- return false;
-
- int64 last_sync_timestamp = fetch.ColumnInt64(0);
- bool initial_sync_ended = fetch.ColumnBool(1);
-
- // Verify there were no additional rows returned.
- DCHECK(!fetch.Step());
- DCHECK(fetch.Succeeded());
-
- sql::Statement update(db_->GetUniqueStatement(
- "INSERT INTO models (model_id, "
- "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
- string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
- update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
- update.BindInt64(1, last_sync_timestamp);
- update.BindBool(2, initial_sync_ended);
-
- if (!update.Run())
- return false;
- }
-
- // Drop the columns from the old share_info table via a temp table.
- const bool kCreateAsTempShareInfo = true;
-
- if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
- return false;
- if (!db_->Execute(
- "INSERT INTO temp_share_info (id, name, store_birthday, "
- "db_create_version, db_create_time, next_id, cache_guid) "
- "SELECT id, name, store_birthday, db_create_version, "
- "db_create_time, next_id, cache_guid FROM share_info"))
- return false;
- SafeDropTable("share_info");
- if (!db_->Execute(
- "ALTER TABLE temp_share_info RENAME TO share_info"))
- return false;
- SetVersion(71);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion71To72() {
- // Version 72 removed a table 'extended_attributes', whose
- // contents didn't matter.
- SafeDropTable("extended_attributes");
- SetVersion(72);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion72To73() {
- // Version 73 added one column to the table 'share_info': notification_state
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
- return false;
- SetVersion(73);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion73To74() {
- // Version 74 added the following columns to the table 'share_info':
- // autofill_migration_state
- // bookmarks_added_during_autofill_migration
- // autofill_migration_time
- // autofill_entries_added_during_migration
- // autofill_profiles_added_during_migration
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_migration_state INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "bookmarks_added_during_autofill_migration "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_entries_added_during_migration "
- "INT default 0"))
- return false;
-
- if (!db_->Execute(
- "ALTER TABLE share_info ADD COLUMN "
- "autofill_profiles_added_during_migration "
- "INT default 0"))
- return false;
-
- SetVersion(74);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion74To75() {
- // In version 74, there was a table 'models':
- // blob model_id (entity specifics, primary key)
- // int last_download_timestamp
- // boolean initial_sync_ended
- // In version 75, we deprecated the integer-valued last_download_timestamp,
- // using insted a protobuf-valued progress_marker field:
- // blob progress_marker
- // The progress_marker values are initialized from the value of
- // last_download_timestamp, thereby preserving the download state.
-
- // Move aside the old table and create a new empty one at the current schema.
- if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
- return false;
- if (!CreateModelsTable())
- return false;
-
- sql::Statement query(db_->GetUniqueStatement(
- "SELECT model_id, last_download_timestamp, initial_sync_ended "
- "FROM temp_models"));
-
- sql::Statement update(db_->GetUniqueStatement(
- "INSERT INTO models (model_id, "
- "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
-
- while (query.Step()) {
- ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
- query.ColumnByteLength(0));
- if (type != UNSPECIFIED) {
- // Set the |timestamp_token_for_migration| on a new
- // DataTypeProgressMarker, using the old value of last_download_timestamp.
- // The server will turn this into a real token on our behalf the next
- // time we check for updates.
- sync_pb::DataTypeProgressMarker progress_marker;
- progress_marker.set_data_type_id(
- GetSpecificsFieldNumberFromModelType(type));
- progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
- std::string progress_blob;
- progress_marker.SerializeToString(&progress_blob);
-
- update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
- update.BindBlob(1, progress_blob.data(), progress_blob.length());
- update.BindBool(2, query.ColumnBool(2));
- if (!update.Run())
- return false;
- update.Reset();
- }
- }
- if (!query.Succeeded())
- return false;
-
- // Drop the old table.
- SafeDropTable("temp_models");
-
- SetVersion(75);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion75To76() {
- // This change removed five columns:
- // autofill_migration_state
- // bookmarks_added_during_autofill_migration
- // autofill_migration_time
- // autofill_entries_added_during_migration
- // autofill_profiles_added_during_migration
- // No data migration is necessary, but we should do a column refresh.
- SetVersion(76);
- needs_column_refresh_ = true;
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion76To77() {
- // This change changes the format of stored timestamps to ms since
- // the Unix epoch.
-#if defined(OS_WIN)
-// On Windows, we used to store timestamps in FILETIME format (100s of
-// ns since Jan 1, 1601). Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime
-// .
-#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
-#else
-// On other platforms, we used to store timestamps in time_t format (s
-// since the Unix epoch).
-#define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
-#endif
- sql::Statement update_timestamps(db_->GetUniqueStatement(
- "UPDATE metas SET "
- TO_UNIX_TIME_MS(mtime) ", "
- TO_UNIX_TIME_MS(server_mtime) ", "
- TO_UNIX_TIME_MS(ctime) ", "
- TO_UNIX_TIME_MS(server_ctime)));
-#undef TO_UNIX_TIME_MS
- if (!update_timestamps.Run())
- return false;
- SetVersion(77);
- return true;
-}
-
-bool DirectoryBackingStore::MigrateVersion77To78() {
- // Version 78 added one column to table 'metas': base_server_specifics.
- if (!db_->Execute(
- "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
- return false;
- }
- SetVersion(78);
- return true;
-}
-
-bool DirectoryBackingStore::CreateTables() {
- DVLOG(1) << "First run, creating tables";
- // Create two little tables share_version and share_info
- if (!db_->Execute(
- "CREATE TABLE share_version ("
- "id VARCHAR(128) primary key, data INT)")) {
- return false;
- }
-
- {
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO share_version VALUES(?, ?)"));
- s.BindString(0, dir_name_);
- s.BindInt(1, kCurrentDBVersion);
-
- if (!s.Run())
- return false;
- }
-
- const bool kCreateAsTempShareInfo = false;
- if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
- return false;
- }
-
- {
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO share_info VALUES"
- "(?, " // id
- "?, " // name
- "?, " // store_birthday
- "?, " // db_create_version
- "?, " // db_create_time
- "-2, " // next_id
- "?, " // cache_guid
- "?);")); // notification_state
- s.BindString(0, dir_name_); // id
- s.BindString(1, dir_name_); // name
- s.BindString(2, ""); // store_birthday
- s.BindString(3, SYNC_ENGINE_VERSION_STRING); // db_create_version
- s.BindInt(4, static_cast<int32>(time(0))); // db_create_time
- s.BindString(5, GenerateCacheGUID()); // cache_guid
- s.BindBlob(6, NULL, 0); // notification_state
-
- if (!s.Run())
- return false;
- }
-
- if (!CreateModelsTable())
- return false;
-
- // Create the big metas table.
- if (!CreateMetasTable(false))
- return false;
-
- {
- // Insert the entry for the root into the metas table.
- const int64 now = browser_sync::TimeToProtoTime(base::Time::Now());
- sql::Statement s(db_->GetUniqueStatement(
- "INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime) "
- "VALUES ( \"r\", 1, 1, ?, ?)"));
- s.BindInt64(0, now);
- s.BindInt64(1, now);
-
- if (!s.Run())
- return false;
- }
-
- return true;
-}
-
-bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
- const char* name = is_temporary ? "temp_metas" : "metas";
- string query = "CREATE TABLE ";
- query.append(name);
- query.append(ComposeCreateTableColumnSpecs());
- return db_->Execute(query.c_str());
-}
-
-bool DirectoryBackingStore::CreateV71ModelsTable() {
- // This is an old schema for the Models table, used from versions 71 to 74.
- return db_->Execute(
- "CREATE TABLE models ("
- "model_id BLOB primary key, "
- "last_download_timestamp INT, "
- // Gets set if the syncer ever gets updates from the
- // server and the server returns 0. Lets us detect the
- // end of the initial sync.
- "initial_sync_ended BOOLEAN default 0)");
-}
-
-bool DirectoryBackingStore::CreateModelsTable() {
- // This is the current schema for the Models table, from version 75
- // onward. If you change the schema, you'll probably want to double-check
- // the use of this function in the v74-v75 migration.
- return db_->Execute(
- "CREATE TABLE models ("
- "model_id BLOB primary key, "
- "progress_marker BLOB, "
- // Gets set if the syncer ever gets updates from the
- // server and the server returns 0. Lets us detect the
- // end of the initial sync.
- "initial_sync_ended BOOLEAN default 0)");
-}
-
-bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
- const char* name = is_temporary ? "temp_share_info" : "share_info";
- string query = "CREATE TABLE ";
- query.append(name);
- // This is the current schema for the ShareInfo table, from version 76
- // onward.
- query.append(" ("
- "id TEXT primary key, "
- "name TEXT, "
- "store_birthday TEXT, "
- "db_create_version TEXT, "
- "db_create_time INT, "
- "next_id INT default -2, "
- "cache_guid TEXT ");
-
- query.append(", notification_state BLOB");
- query.append(")");
- return db_->Execute(query.c_str());
-}
-
-bool DirectoryBackingStore::CreateShareInfoTableVersion71(
- bool is_temporary) {
- const char* name = is_temporary ? "temp_share_info" : "share_info";
- string query = "CREATE TABLE ";
- query.append(name);
- // This is the schema for the ShareInfo table used from versions 71 to 72.
- query.append(" ("
- "id TEXT primary key, "
- "name TEXT, "
- "store_birthday TEXT, "
- "db_create_version TEXT, "
- "db_create_time INT, "
- "next_id INT default -2, "
- "cache_guid TEXT )");
- return db_->Execute(query.c_str());
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
deleted file mode 100644
index caafdf8..0000000
--- a/chrome/browser/sync/syncable/directory_backing_store.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
-#pragma once
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "chrome/browser/sync/syncable/dir_open_result.h"
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "chrome/browser/sync/syncable/syncable.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-
-namespace sync_pb {
-class EntitySpecifics;
-}
-
-namespace syncable {
-
-struct ColumnSpec;
-typedef Directory::MetahandlesIndex MetahandlesIndex;
-
-// Interface that provides persistence for a syncable::Directory object. You can
-// load all the persisted data to prime a syncable::Directory on startup by
-// invoking Load. The only other thing you (or more correctly, a Directory) can
-// do here is save any changes that have occurred since calling Load, which can
-// be done periodically as often as desired.
-//
-// The DirectoryBackingStore will own an sqlite lock on its database for most of
-// its lifetime. You must not have two DirectoryBackingStore objects accessing
-// the database simultaneously. Because the lock exists at the database level,
-// not even two separate browser instances would be able to acquire it
-// simultaneously.
-//
-// This class is abstract so that we can extend it in interesting ways for use
-// in tests. The concrete class used in non-test scenarios is
-// OnDiskDirectoryBackingStore.
-class DirectoryBackingStore : public base::NonThreadSafe {
- public:
- explicit DirectoryBackingStore(const std::string& dir_name);
- virtual ~DirectoryBackingStore();
-
- // Loads and drops all currently persisted meta entries into |entry_bucket|
- // and loads appropriate persisted kernel info into |info_bucket|.
- //
- // This function can perform some cleanup tasks behind the scenes. It will
- // clean up unused entries from the database and migrate to the latest
- // database version. The caller can safely ignore these details.
- //
- // NOTE: On success (return value of OPENED), the buckets are populated with
- // newly allocated items, meaning ownership is bestowed upon the caller.
- virtual DirOpenResult Load(MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info) = 0;
-
- // Updates the on-disk store with the input |snapshot| as a database
- // transaction. Does NOT open any syncable transactions as this would cause
- // opening transactions elsewhere to block on synchronous I/O.
- // DO NOT CALL THIS FROM MORE THAN ONE THREAD EVER. Also, whichever thread
- // calls SaveChanges *must* be the thread that owns/destroys |this|.
- virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot);
-
- protected:
- // For test classes.
- DirectoryBackingStore(const std::string& dir_name,
- sql::Connection* connection);
-
- // General Directory initialization and load helpers.
- bool InitializeTables();
- bool CreateTables();
-
- // Create 'share_info' or 'temp_share_info' depending on value of
- // is_temporary. Returns an sqlite
- bool CreateShareInfoTable(bool is_temporary);
-
- bool CreateShareInfoTableVersion71(bool is_temporary);
- // Create 'metas' or 'temp_metas' depending on value of is_temporary.
- bool CreateMetasTable(bool is_temporary);
- bool CreateModelsTable();
- bool CreateV71ModelsTable();
-
- // We don't need to load any synced and applied deleted entries, we can
- // in fact just purge them forever on startup.
- bool DropDeletedEntries();
- // Drops a table if it exists, harmless if the table did not already exist.
- bool SafeDropTable(const char* table_name);
-
- // Load helpers for entries and attributes.
- bool LoadEntries(MetahandlesIndex* entry_bucket);
- bool LoadInfo(Directory::KernelLoadInfo* info);
-
- // Save/update helpers for entries. Return false if sqlite commit fails.
- bool SaveEntryToDB(const EntryKernel& entry);
- bool SaveNewEntryToDB(const EntryKernel& entry);
- bool UpdateEntryToDB(const EntryKernel& entry);
-
- DirOpenResult DoLoad(MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info);
-
- // Close save_dbhandle_. Broken out for testing.
- void EndSave();
-
- // Removes each entry whose metahandle is in |handles| from the database.
- // Does synchronous I/O. Returns false on error.
- bool DeleteEntries(const MetahandleSet& handles);
-
- // Drop all tables in preparation for reinitialization.
- void DropAllTables();
-
- // Serialization helpers for syncable::ModelType. These convert between
- // the ModelType enum and the values we persist in the database to identify
- // a model. We persist a default instance of the specifics protobuf as the
- // ID, rather than the enum value.
- static ModelType ModelIdToModelTypeEnum(const void* data, int length);
- static std::string ModelTypeEnumToModelId(ModelType model_type);
-
- static std::string GenerateCacheGUID();
-
- // Runs an integrity check on the current database. If the
- // integrity check fails, false is returned and error is populated
- // with an error message.
- bool CheckIntegrity(sqlite3* handle, std::string* error) const;
-
- // Migration utilities.
- bool RefreshColumns();
- bool SetVersion(int version);
- int GetVersion();
-
- bool MigrateToSpecifics(const char* old_columns,
- const char* specifics_column,
- void(*handler_function) (
- sql::Statement* old_value_query,
- int old_value_column,
- sync_pb::EntitySpecifics* mutable_new_value));
-
- // Individual version migrations.
- bool MigrateVersion67To68();
- bool MigrateVersion68To69();
- bool MigrateVersion69To70();
- bool MigrateVersion70To71();
- bool MigrateVersion71To72();
- bool MigrateVersion72To73();
- bool MigrateVersion73To74();
- bool MigrateVersion74To75();
- bool MigrateVersion75To76();
- bool MigrateVersion76To77();
- bool MigrateVersion77To78();
-
- scoped_ptr<sql::Connection> db_;
- sql::Statement save_entry_statement_;
- std::string dir_name_;
-
- // Set to true if migration left some old columns around that need to be
- // discarded.
- bool needs_column_refresh_;
-
- DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
diff --git a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
deleted file mode 100644
index 0863ce0..0000000
--- a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
+++ /dev/null
@@ -1,2162 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include <string>
-
-#include "base/file_path.h"
-#include "base/file_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/scoped_temp_dir.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-#include "chrome/browser/sync/syncable/on_disk_directory_backing_store.h"
-#include "chrome/browser/sync/syncable/syncable-inl.h"
-#include "chrome/browser/sync/syncable/syncable.h"
-#include "chrome/browser/sync/test/test_directory_backing_store.h"
-#include "chrome/browser/sync/util/time.h"
-#include "sql/connection.h"
-#include "sql/statement.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "testing/gtest/include/gtest/gtest-param-test.h"
-
-namespace syncable {
-
-extern const int32 kCurrentDBVersion;
-
-class MigrationTest : public testing::TestWithParam<int> {
- public:
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- }
-
- protected:
- std::string GetUsername() {
- return "nick@chromium.org";
- }
-
- FilePath GetDatabasePath() {
- return temp_dir_.path().Append(Directory::kSyncDatabaseFilename);
- }
-
- static bool LoadAndIgnoreReturnedData(DirectoryBackingStore *dbs) {
- MetahandlesIndex metas;
- STLElementDeleter<MetahandlesIndex> index_deleter(&metas);
- Directory::KernelLoadInfo kernel_load_info;
- return dbs->Load(&metas, &kernel_load_info) == OPENED;
- }
-
- void SetUpVersion67Database(sql::Connection* connection);
- void SetUpVersion68Database(sql::Connection* connection);
- void SetUpVersion69Database(sql::Connection* connection);
- void SetUpVersion70Database(sql::Connection* connection);
- void SetUpVersion71Database(sql::Connection* connection);
- void SetUpVersion72Database(sql::Connection* connection);
- void SetUpVersion73Database(sql::Connection* connection);
- void SetUpVersion74Database(sql::Connection* connection);
- void SetUpVersion75Database(sql::Connection* connection);
- void SetUpVersion76Database(sql::Connection* connection);
- void SetUpVersion77Database(sql::Connection* connection);
-
- void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
- SetUpVersion77Database(connection); // Prepopulates data.
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), connection));
-
- ASSERT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
- }
-
- private:
- ScopedTempDir temp_dir_;
-};
-
-class DirectoryBackingStoreTest : public MigrationTest {};
-
-#if defined(OS_WIN)
-
-// On Windows, we used to store timestamps in FILETIME format.
-#define LEGACY_META_PROTO_TIMES_1 129079956640320000LL
-#define LEGACY_META_PROTO_TIMES_2 128976886618480000LL
-#define LEGACY_META_PROTO_TIMES_4 129002163642690000LL
-#define LEGACY_META_PROTO_TIMES_5 129001555500000000LL
-#define LEGACY_META_PROTO_TIMES_6 129053976170000000LL
-#define LEGACY_META_PROTO_TIMES_7 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_8 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_9 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_10 128976864758480000LL
-#define LEGACY_META_PROTO_TIMES_11 129079956948440000LL
-#define LEGACY_META_PROTO_TIMES_12 129079957513650000LL
-#define LEGACY_META_PROTO_TIMES_13 129079957985300000LL
-#define LEGACY_META_PROTO_TIMES_14 129079958383000000LL
-
-#define LEGACY_META_PROTO_TIMES_STR_1 "129079956640320000"
-#define LEGACY_META_PROTO_TIMES_STR_2 "128976886618480000"
-#define LEGACY_META_PROTO_TIMES_STR_4 "129002163642690000"
-#define LEGACY_META_PROTO_TIMES_STR_5 "129001555500000000"
-#define LEGACY_META_PROTO_TIMES_STR_6 "129053976170000000"
-#define LEGACY_META_PROTO_TIMES_STR_7 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_8 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_9 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_10 "128976864758480000"
-#define LEGACY_META_PROTO_TIMES_STR_11 "129079956948440000"
-#define LEGACY_META_PROTO_TIMES_STR_12 "129079957513650000"
-#define LEGACY_META_PROTO_TIMES_STR_13 "129079957985300000"
-#define LEGACY_META_PROTO_TIMES_STR_14 "129079958383000000"
-
-// Generated via:
-//
-// ruby -ane '$F[1].sub!("LEGACY_", ""); $F[2] = Integer($F[2].sub!("LL", "")) / 10000 - 11644473600000; print "#{$F[0]} #{$F[1]} #{$F[2]}LL\n"'
-//
-// Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime .
-
-// Now we store them in Java format (ms since the Unix epoch).
-#define META_PROTO_TIMES_1 1263522064032LL
-#define META_PROTO_TIMES_2 1253215061848LL
-#define META_PROTO_TIMES_4 1255742764269LL
-#define META_PROTO_TIMES_5 1255681950000LL
-#define META_PROTO_TIMES_6 1260924017000LL
-#define META_PROTO_TIMES_7 1253212875848LL
-#define META_PROTO_TIMES_8 1253212875848LL
-#define META_PROTO_TIMES_9 1253212875848LL
-#define META_PROTO_TIMES_10 1253212875848LL
-#define META_PROTO_TIMES_11 1263522094844LL
-#define META_PROTO_TIMES_12 1263522151365LL
-#define META_PROTO_TIMES_13 1263522198530LL
-#define META_PROTO_TIMES_14 1263522238300LL
-
-#define META_PROTO_TIMES_STR_1 "1263522064032"
-#define META_PROTO_TIMES_STR_2 "1253215061848"
-#define META_PROTO_TIMES_STR_4 "1255742764269"
-#define META_PROTO_TIMES_STR_5 "1255681950000"
-#define META_PROTO_TIMES_STR_6 "1260924017000"
-#define META_PROTO_TIMES_STR_7 "1253212875848"
-#define META_PROTO_TIMES_STR_8 "1253212875848"
-#define META_PROTO_TIMES_STR_9 "1253212875848"
-#define META_PROTO_TIMES_STR_10 "1253212875848"
-#define META_PROTO_TIMES_STR_11 "1263522094844"
-#define META_PROTO_TIMES_STR_12 "1263522151365"
-#define META_PROTO_TIMES_STR_13 "1263522198530"
-#define META_PROTO_TIMES_STR_14 "1263522238300"
-
-#else
-
-// On other platforms, we used to store timestamps in time_t format (s
-// since the Unix epoch).
-#define LEGACY_META_PROTO_TIMES_1 1263522064LL
-#define LEGACY_META_PROTO_TIMES_2 1253215061LL
-#define LEGACY_META_PROTO_TIMES_4 1255742764LL
-#define LEGACY_META_PROTO_TIMES_5 1255681950LL
-#define LEGACY_META_PROTO_TIMES_6 1260924017LL
-#define LEGACY_META_PROTO_TIMES_7 1253212875LL
-#define LEGACY_META_PROTO_TIMES_8 1253212875LL
-#define LEGACY_META_PROTO_TIMES_9 1253212875LL
-#define LEGACY_META_PROTO_TIMES_10 1253212875LL
-#define LEGACY_META_PROTO_TIMES_11 1263522094LL
-#define LEGACY_META_PROTO_TIMES_12 1263522151LL
-#define LEGACY_META_PROTO_TIMES_13 1263522198LL
-#define LEGACY_META_PROTO_TIMES_14 1263522238LL
-
-#define LEGACY_META_PROTO_TIMES_STR_1 "1263522064"
-#define LEGACY_META_PROTO_TIMES_STR_2 "1253215061"
-#define LEGACY_META_PROTO_TIMES_STR_4 "1255742764"
-#define LEGACY_META_PROTO_TIMES_STR_5 "1255681950"
-#define LEGACY_META_PROTO_TIMES_STR_6 "1260924017"
-#define LEGACY_META_PROTO_TIMES_STR_7 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_8 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_9 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_10 "1253212875"
-#define LEGACY_META_PROTO_TIMES_STR_11 "1263522094"
-#define LEGACY_META_PROTO_TIMES_STR_12 "1263522151"
-#define LEGACY_META_PROTO_TIMES_STR_13 "1263522198"
-#define LEGACY_META_PROTO_TIMES_STR_14 "1263522238"
-
-// Now we store them in Java format (ms since the Unix epoch).
-#define META_PROTO_TIMES_1 1263522064000LL
-#define META_PROTO_TIMES_2 1253215061000LL
-#define META_PROTO_TIMES_4 1255742764000LL
-#define META_PROTO_TIMES_5 1255681950000LL
-#define META_PROTO_TIMES_6 1260924017000LL
-#define META_PROTO_TIMES_7 1253212875000LL
-#define META_PROTO_TIMES_8 1253212875000LL
-#define META_PROTO_TIMES_9 1253212875000LL
-#define META_PROTO_TIMES_10 1253212875000LL
-#define META_PROTO_TIMES_11 1263522094000LL
-#define META_PROTO_TIMES_12 1263522151000LL
-#define META_PROTO_TIMES_13 1263522198000LL
-#define META_PROTO_TIMES_14 1263522238000LL
-
-#define META_PROTO_TIMES_STR_1 "1263522064000"
-#define META_PROTO_TIMES_STR_2 "1253215061000"
-#define META_PROTO_TIMES_STR_4 "1255742764000"
-#define META_PROTO_TIMES_STR_5 "1255681950000"
-#define META_PROTO_TIMES_STR_6 "1260924017000"
-#define META_PROTO_TIMES_STR_7 "1253212875000"
-#define META_PROTO_TIMES_STR_8 "1253212875000"
-#define META_PROTO_TIMES_STR_9 "1253212875000"
-#define META_PROTO_TIMES_STR_10 "1253212875000"
-#define META_PROTO_TIMES_STR_11 "1263522094000"
-#define META_PROTO_TIMES_STR_12 "1263522151000"
-#define META_PROTO_TIMES_STR_13 "1263522198000"
-#define META_PROTO_TIMES_STR_14 "1263522238000"
-
-#endif
-
-// Helper macros for the database dumps in the SetUpVersion*Database
-// functions.
-#define LEGACY_META_PROTO_TIMES(x) LEGACY_META_PROTO_TIMES_##x
-#define LEGACY_META_PROTO_TIMES_STR(x) LEGACY_META_PROTO_TIMES_STR_##x
-#define LEGACY_PROTO_TIME_VALS(x) \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x) "," \
- LEGACY_META_PROTO_TIMES_STR(x)
-#define META_PROTO_TIMES(x) META_PROTO_TIMES_##x
-#define META_PROTO_TIMES_STR(x) META_PROTO_TIMES_STR_##x
-#define META_PROTO_TIMES_VALS(x) \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x) "," \
- META_PROTO_TIMES_STR(x)
-
-namespace {
-
-// Helper functions for testing.
-
-enum ShouldIncludeDeletedItems {
- INCLUDE_DELETED_ITEMS,
- DONT_INCLUDE_DELETED_ITEMS
-};
-
-// Returns a map from metahandle -> expected legacy time (in proto
-// format).
-std::map<int64, int64> GetExpectedLegacyMetaProtoTimes(
- enum ShouldIncludeDeletedItems include_deleted) {
- std::map<int64, int64> expected_legacy_meta_proto_times;
- expected_legacy_meta_proto_times[1] = LEGACY_META_PROTO_TIMES(1);
- if (include_deleted == INCLUDE_DELETED_ITEMS) {
- expected_legacy_meta_proto_times[2] = LEGACY_META_PROTO_TIMES(2);
- expected_legacy_meta_proto_times[4] = LEGACY_META_PROTO_TIMES(4);
- expected_legacy_meta_proto_times[5] = LEGACY_META_PROTO_TIMES(5);
- }
- expected_legacy_meta_proto_times[6] = LEGACY_META_PROTO_TIMES(6);
- expected_legacy_meta_proto_times[7] = LEGACY_META_PROTO_TIMES(7);
- expected_legacy_meta_proto_times[8] = LEGACY_META_PROTO_TIMES(8);
- expected_legacy_meta_proto_times[9] = LEGACY_META_PROTO_TIMES(9);
- expected_legacy_meta_proto_times[10] = LEGACY_META_PROTO_TIMES(10);
- expected_legacy_meta_proto_times[11] = LEGACY_META_PROTO_TIMES(11);
- expected_legacy_meta_proto_times[12] = LEGACY_META_PROTO_TIMES(12);
- expected_legacy_meta_proto_times[13] = LEGACY_META_PROTO_TIMES(13);
- expected_legacy_meta_proto_times[14] = LEGACY_META_PROTO_TIMES(14);
- return expected_legacy_meta_proto_times;
-}
-
-// Returns a map from metahandle -> expected time (in proto format).
-std::map<int64, int64> GetExpectedMetaProtoTimes(
- enum ShouldIncludeDeletedItems include_deleted) {
- std::map<int64, int64> expected_meta_proto_times;
- expected_meta_proto_times[1] = META_PROTO_TIMES(1);
- if (include_deleted == INCLUDE_DELETED_ITEMS) {
- expected_meta_proto_times[2] = META_PROTO_TIMES(2);
- expected_meta_proto_times[4] = META_PROTO_TIMES(4);
- expected_meta_proto_times[5] = META_PROTO_TIMES(5);
- }
- expected_meta_proto_times[6] = META_PROTO_TIMES(6);
- expected_meta_proto_times[7] = META_PROTO_TIMES(7);
- expected_meta_proto_times[8] = META_PROTO_TIMES(8);
- expected_meta_proto_times[9] = META_PROTO_TIMES(9);
- expected_meta_proto_times[10] = META_PROTO_TIMES(10);
- expected_meta_proto_times[11] = META_PROTO_TIMES(11);
- expected_meta_proto_times[12] = META_PROTO_TIMES(12);
- expected_meta_proto_times[13] = META_PROTO_TIMES(13);
- expected_meta_proto_times[14] = META_PROTO_TIMES(14);
- return expected_meta_proto_times;
-}
-
-// Returns a map from metahandle -> expected time (as a Time object).
-std::map<int64, base::Time> GetExpectedMetaTimes() {
- std::map<int64, base::Time> expected_meta_times;
- const std::map<int64, int64>& expected_meta_proto_times =
- GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS);
- for (std::map<int64, int64>::const_iterator it =
- expected_meta_proto_times.begin();
- it != expected_meta_proto_times.end(); ++it) {
- expected_meta_times[it->first] =
- browser_sync::ProtoTimeToTime(it->second);
- }
- return expected_meta_times;
-}
-
-// Extracts a map from metahandle -> time (in proto format) from the
-// given database.
-std::map<int64, int64> GetMetaProtoTimes(sql::Connection *db) {
- sql::Statement s(db->GetCachedStatement(
- SQL_FROM_HERE,
- "SELECT metahandle, mtime, server_mtime, ctime, server_ctime "
- "FROM metas"));
- EXPECT_EQ(5, s.ColumnCount());
- std::map<int64, int64> meta_times;
- while (s.Step()) {
- int64 metahandle = s.ColumnInt64(0);
- int64 mtime = s.ColumnInt64(1);
- int64 server_mtime = s.ColumnInt64(2);
- int64 ctime = s.ColumnInt64(3);
- int64 server_ctime = s.ColumnInt64(4);
- EXPECT_EQ(mtime, server_mtime);
- EXPECT_EQ(mtime, ctime);
- EXPECT_EQ(mtime, server_ctime);
- meta_times[metahandle] = mtime;
- }
- EXPECT_TRUE(s.Succeeded());
- return meta_times;
-}
-
-::testing::AssertionResult AssertTimesMatch(const char* t1_expr,
- const char* t2_expr,
- const base::Time& t1,
- const base::Time& t2) {
- if (t1 == t2)
- return ::testing::AssertionSuccess();
-
- return ::testing::AssertionFailure()
- << t1_expr << " and " << t2_expr
- << " (internal values: " << t1.ToInternalValue()
- << " and " << t2.ToInternalValue()
- << ") (proto time: " << browser_sync::TimeToProtoTime(t1)
- << " and " << browser_sync::TimeToProtoTime(t2)
- << ") do not match";
-}
-
-// Expect that all time fields of the given entry kernel will be the
-// given time.
-void ExpectTime(const EntryKernel& entry_kernel,
- const base::Time& expected_time) {
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(CTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(SERVER_CTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(MTIME));
- EXPECT_PRED_FORMAT2(AssertTimesMatch,
- expected_time, entry_kernel.ref(SERVER_MTIME));
-}
-
-// Expect that all the entries in |index| have times matching those in
-// the given map (from metahandle to expect time).
-void ExpectTimes(const MetahandlesIndex& index,
- const std::map<int64, base::Time>& expected_times) {
- for (MetahandlesIndex::const_iterator it = index.begin();
- it != index.end(); ++it) {
- int64 meta_handle = (*it)->ref(META_HANDLE);
- SCOPED_TRACE(meta_handle);
- std::map<int64, base::Time>::const_iterator it2 =
- expected_times.find(meta_handle);
- if (it2 == expected_times.end()) {
- ADD_FAILURE() << "Could not find expected time for " << meta_handle;
- continue;
- }
- ExpectTime(**it, it2->second);
- }
-}
-
-} // namespace
-
-void MigrationTest::SetUpVersion67Database(sql::Connection* connection) {
- // This is a version 67 database dump whose contents were backformed from
- // the contents of the version 68 database dump (the v68 migration was
- // actually written first).
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,server_is_bookmark_object bit default 0,"
- "name varchar(255), " /* COLLATE PATHNAME, */
- "unsanitized_name varchar(255)," /* COLLATE PATHNAME, */
- "non_unique_name varchar,"
- "server_name varchar(255)," /* COLLATE PATHNAME */
- "server_non_unique_name varchar,"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
- "NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item',NULL,'Deleted Item','Deleted Item','Deleted Item',"
- "'http://www.google.com/','http://www.google.com/2',NULL,'AASGASGA',"
- "'ASADGADGADG');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium',NULL,'Welcome to Chromium',"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,"
- "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google',NULL,'Google','Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
- "'The Internet',NULL,'The Internet','The Internet',"
- "'The Internet',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ","
- "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
- "'Google Chrome',NULL,'Google Chrome','Google Chrome',"
- "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,"
- "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "NULL,'Bookmarks','Bookmarks','Bookmarks',NULL,NULL,"
- "'google_chrome_bookmarks',NULL,NULL);"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ","
- "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
- "1,'Bookmark Bar',NULL,'Bookmark Bar','Bookmark Bar','Bookmark Bar',"
- "NULL,NULL,'bookmark_bar',NULL,NULL);"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,"
- "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks',NULL,'Other Bookmarks','Other Bookmarks',"
- "'Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL);"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)',NULL,'Home (The Chromium Projects)',"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks',NULL,'Extra Bookmarks','Extra Bookmarks',"
- "'Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,"
- "'PNGAXF0AAFF','DAAFASF');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project',NULL,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'The WebKit Open Source Project','http://webkit.org/',"
- "'http://webkit.org/x',NULL,'PNGX','PNG2Y');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion68Database(sql::Connection* connection) {
- // This sets up an actual version 68 database dump. The IDs were
- // canonicalized to be less huge, and the favicons were overwritten
- // with random junk so that they didn't contain any unprintable
- // characters. A few server URLs were tweaked so that they'd be
- // different from the local URLs. Lastly, the custom collation on
- // the server_non_unique_name column was removed.
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,"
- "server_is_bookmark_object bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,"
- "NULL,NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item','Deleted Item','http://www.google.com/',"
- "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,"
- "7,'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,"
- "'The Internet','The Internet',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ","
- "1048576,0,'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,"
- "'Google Chrome','Google Chrome',NULL,NULL,'google_chrome',NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,"
- "0,'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL);"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ","
- "1048576,1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,"
- "1,'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,"
- "NULL);"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,"
- "2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL);"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,"
- "'PNGAXF0AAFF','DAAFASF');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',68);"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion69Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE metas (metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "is_bookmark_object bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,"
- "server_is_bookmark_object bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "bookmark_url varchar,server_bookmark_url varchar,"
- "singleton_tag varchar,bookmark_favicon blob,"
- "server_bookmark_favicon blob, specifics blob, "
- "server_specifics blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,0,0,NULL,NULL,NULL,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,"
- "4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,1,0,1,1,"
- "'Deleted Item','Deleted Item','http://www.google.com/',"
- "'http://www.google.com/2',NULL,'AASGASGA','ASADGADGADG',"
- "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841415"
- "34741534741',X'C28810260A17687474703A2F2F7777772E676F6F676C652E636F"
- "6D2F32120B4153414447414447414447');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,1,0,1,1,"
- "'Welcome to Chromium','Welcome to Chromium',"
- "'http://www.google.com/chrome/intl/en/welcome.html',"
- "'http://www.google.com/chrome/intl/en/welcome.html',NULL,NULL,NULL,"
- "X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A3168"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F6"
- "56E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,1,0,1,1,"
- "'Google','Google','http://www.google.com/',"
- "'http://www.google.com/',NULL,'AGASGASG','AGFDGASG',X'C28810220A166"
- "87474703A2F2F7777772E676F6F676C652E636F6D2F12084147415347415347',X'"
- "C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F12084147464"
- "447415347');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6"
- ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,1,0,1,'The Internet',"
- "'The Internet',NULL,NULL,NULL,NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,1,0,1,'Google Chrome',"
- "'Google Chrome',NULL,NULL,'google_chrome',NULL,NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,1,0,1,'Bookmarks',"
- "'Bookmarks',NULL,NULL,'google_chrome_bookmarks',NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,1,0,1,"
- "'Bookmark Bar','Bookmark Bar',NULL,NULL,'bookmark_bar',NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,1,0,1,"
- "'Other Bookmarks','Other Bookmarks',NULL,NULL,'other_bookmarks',"
- "NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,1,0,0,1,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "'http://dev.chromium.org/','http://dev.chromium.org/other',NULL,"
- "'AGATWA','AFAGVASF',X'C28810220A18687474703A2F2F6465762E6368726F6D6"
- "9756D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F646576"
- "2E6368726F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,1,0,1,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,NULL,NULL,NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,1,0,0,"
- "1,'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'http://www.icann.com/','http://www.icann.com/',NULL,'PNGAXF0AAFF',"
- "'DAAFASF',X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
- "20B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963"
- "616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,1,0,0,1,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "'http://webkit.org/','http://webkit.org/x',NULL,'PNGX','PNG2Y',"
- "X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',X'C2"
- "88101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259');"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',69);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion70Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE share_info (id VARCHAR(128) primary key, "
- "last_sync_timestamp INT, name VARCHAR(128), "
- "initial_sync_ended BIT default 0, store_birthday VARCHAR(256), "
- "db_create_version VARCHAR(128), db_create_time int, "
- "next_id bigint default -2, cache_guid VARCHAR(32));"
- "INSERT INTO share_info VALUES('nick@chromium.org',694,"
- "'nick@chromium.org',1,'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb',"
- "'Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO share_version VALUES('nick@chromium.org',70);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,"
- "ctime bigint default 0,server_ctime bigint default 0,"
- "server_position_in_parent bigint default 0,"
- "local_external_id bigint default 0,id varchar(255) default 'r',"
- "parent_id varchar(255) default 'r',"
- "server_parent_id varchar(255) default 'r',"
- "prev_id varchar(255) default 'r',next_id varchar(255) default 'r',"
- "is_unsynced bit default 0,is_unapplied_update bit default 0,"
- "is_del bit default 0,is_dir bit default 0,"
- "server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,"
- "specifics blob,server_specifics blob);"
- "INSERT INTO metas VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
- "INSERT INTO metas VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2) ","
- "-2097152,4,'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,"
- "1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A"
- "2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X'C2881026"
- "0A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B415341444741"
- "4447414447');"
- "INSERT INTO metas VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,"
- "3,'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A"
- "31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E74"
- "6C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F"
- "2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
- "6C636F6D652E68746D6C1200');"
- "INSERT INTO metas VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C6"
- "52E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F77777"
- "72E676F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO metas VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,"
- "6,'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome',"
- "'Google Chrome','google_chrome',NULL,NULL,NULL);"
- "INSERT INTO metas VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,"
- "1,'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,"
- "'Bookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ","
- "2097152,2,'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO metas VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,"
- "8,'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',"
- "NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F"
- "72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636872"
- "6F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO metas VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO metas VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,"
- "10,'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F"
- "120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772E69"
- "63616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO metas VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,"
- "11,'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F78120550"
- "4E473259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion71Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE extended_attributes(metahandle bigint, key varchar(127), "
- "value blob, PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE);"
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',71);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion72Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',72);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion73Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',73);"
- "CREATE TABLE metas(metahandle bigint primary key ON CONFLICT FAIL,"
- "base_version bigint default -1,server_version bigint default 0,"
- "mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
- "default 0,server_ctime bigint default 0,server_position_in_parent "
- "bigint default 0,local_external_id bigint default 0,id varchar(255) "
- "default 'r',parent_id varchar(255) default 'r',server_parent_id "
- "varchar(255) default 'r',prev_id varchar(255) default 'r',next_id "
- "varchar(255) default 'r',is_unsynced bit default 0,"
- "is_unapplied_update bit default 0,is_del bit default 0,is_dir bit "
- "default 0,server_is_dir bit default 0,server_is_del bit default 0,"
- "non_unique_name varchar,server_non_unique_name varchar(255),"
- "unique_server_tag varchar,unique_client_tag varchar,specifics blob,"
- "server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,"
- "NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,"
- "'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,"
- "'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F"
- "7777772E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A1768"
- "7474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474144"
- "47');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,"
- "'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,"
- "'Welcome to Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A31"
- "687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F"
- "656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7777"
- "772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D"
- "652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,"
- "'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google',"
- "'Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652"
- "E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,"
- "'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet',"
- "'The Internet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,"
- "'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome'"
- ",'google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,"
- "'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks',"
- "'Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,"
- "'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar',"
- "'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,"
- "'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,"
- "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,"
- "X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,"
- "'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,"
- "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,"
- "NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1"
- "206414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,"
- "'s_ID_12','s_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,"
- "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',"
- "X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,"
- "'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',"
- "'ICANN | Internet Corporation for Assigned Names and Numbers',NULL,"
- "NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2"
- "E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,"
- "'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,"
- "'The WebKit Open Source Project','The WebKit Open Source Project',"
- "NULL,NULL,""X'C288101A0A12687474703A2F2F7765626B69742E6F72672F120450"
- "4E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E"
- "473259');"
- "CREATE TABLE models (model_id BLOB primary key, "
- "last_download_timestamp INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, "
- "store_birthday TEXT, db_create_version TEXT, db_create_time INT, "
- "next_id INT default -2, cache_guid TEXT, "
- "notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,"
- "'9010788312004066376x-6609234393368420856x',X'C2881000');"));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion74Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',74);"
- "CREATE TABLE models (model_id BLOB primary key, last_download_timestamp"
- " INT, initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',694,1);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
- "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
- "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
- "tion_state INT default 0, bookmarks_added_during_autofill_migration"
- " INT default 0, autofill_migration_time INT default 0, autofill_ent"
- "ries_added_during_migration INT default 0, autofill_profiles_added_"
- "during_migration INT default 0);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org'"
- ",'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542"
- ",'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,bas"
- "e_version bigint default -1,server_version bigint default 0,mtime b"
- "igint default 0,server_mtime bigint default 0,ctime bigint default "
- "0,server_ctime bigint default 0,server_position_in_parent bigint de"
- "fault 0,local_external_id bigint default 0,id varchar(255) default "
- "'r',parent_id varchar(255) default 'r',server_parent_id varchar(255"
- ") default 'r',prev_id varchar(255) default 'r',next_id varchar(255)"
- " default 'r',is_unsynced bit default 0,is_unapplied_update bit defa"
- "ult 0,is_del bit default 0,is_dir bit default 0,server_is_dir bit d"
- "efault 0,server_is_del bit default 0,non_unique_name varchar,server"
- "_non_unique_name varchar(255),unique_server_tag varchar,unique_clie"
- "nt_tag varchar,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'"
- "');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_2','s_ID"
- "_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted "
- "Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
- "36F6D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E67"
- "6F6F676C652E636F6D2F32120B4153414447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_4','s_ID"
- "_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','W"
- "elcome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E6"
- "76F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E"
- "68746D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636"
- "F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5','s_ID_"
- "9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NU"
- "LL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F120841"
- "47415347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E6"
- "36F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_6','s_ID"
- "_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL"
- ",NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7','r','r"
- "','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrom"
- "e',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8','s_ID_"
- "7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chr"
- "ome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9','s_ID_"
- "8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar'"
- ",'bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_10','s_I"
- "D_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Boo"
- "kmarks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID_11','s_"
- "ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projec"
- "ts)','Home (The Chromium Projects)',NULL,NULL,X'C28810220A186874747"
- "03A2F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810"
- "290A1D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120"
- "84146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s_ID_6','"
- "s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bo"
- "okmarks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID_13','s_"
- "ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Co"
- "rporation for Assigned Names and Numbers','ICANN | Internet Corpora"
- "tion for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474"
- "703A2F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C"
- "28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F12074441414641"
- "5346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID_14','s_"
- "ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Pr"
- "oject','The WebKit Open Source Project',NULL,NULL,X'C288101A0A12687"
- "474703A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474"
- "703A2F2F7765626B69742E6F72672F781205504E473259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion75Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',75);"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthd"
- "ay TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
- "fault -2, cache_guid TEXT , notification_state BLOB, autofill_migra"
- "tion_state INT default 0,bookmarks_added_during_autofill_migration "
- "INT default 0, autofill_migration_time INT default 0, autofill_entr"
- "ies_added_during_migration INT default 0, autofill_profiles_added_d"
- "uring_migration INT default 0);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org"
- "','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-655"
- "42,'9010788312004066376x-6609234393368420856x',NULL,0,0,0,0,0);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, "
- "initial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,ba"
- "se_version bigint default -1,server_version bigint default 0,mtime"
- " bigint default 0,server_mtime bigint default 0,ctime bigint defau"
- "lt 0,server_ctime bigint default 0,server_position_in_parent bigin"
- "t default 0,local_external_id bigint default 0,id varchar(255) def"
- "ault 'r',parent_id varchar(255) default 'r',server_parent_id varch"
- "ar(255) default 'r',prev_id varchar(255) default 'r',next_id varch"
- "ar(255) default 'r',is_unsynced bit default 0,is_unapplied_update "
- "bit default 0,is_del bit default 0,is_dir bit default 0,server_is_"
- "dir bit default 0,server_is_del bit default 0,non_unique_name varc"
- "har,server_non_unique_name varchar(255),unique_server_tag varchar,"
- "unique_client_tag varchar,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NUL"
- "L,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_"
- "2','s_ID_9','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Ite"
- "m','Deleted Item',NULL,NULL,X'C28810220A16687474703A2F2F7777772"
- "E676F6F676C652E636F6D2F12084141534741534741',X'C28810260A176874"
- "74703A2F2F7777772E676F6F676C652E636F6D2F32120B41534144474144474"
- "14447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_"
- "4','s_ID_9','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to "
- "Chromium','Welcome to Chromium',NULL,NULL,X'C28810350A316874747"
- "03A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F65"
- "6E2F77656C636F6D652E68746D6C1200',X'C28810350A31687474703A2F2F7"
- "777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656E2F7765"
- "6C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5"
- "','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Goo"
- "gle',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C65"
- "2E636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F777"
- "7772E676F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_"
- "6','s_ID_9','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The In"
- "ternet',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7"
- "','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','"
- "google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8"
- "','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks'"
- ",'google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9"
- "','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','B"
- "ookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_"
- "10','s_ID_8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks"
- "','Other Bookmarks','other_bookmarks',NULL,X'C2881000',X'C28810"
- "00');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID"
- "_11','s_ID_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chr"
- "omium Projects)','Home (The Chromium Projects)',NULL,NULL,X'C28"
- "810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F120641"
- "4741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s"
- "_ID_6','s_ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmark"
- "s','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID"
- "_13','s_ID_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN |"
- " Internet Corporation for Assigned Names and Numbers','ICANN | "
- "Internet Corporation for Assigned Names and Numbers',NULL,NULL,"
- "X'C28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504"
- "E474158463041414646',X'C28810200A15687474703A2F2F7777772E696361"
- "6E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID"
- "_14','s_ID_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Op"
- "en Source Project','The WebKit Open Source Project',NULL,NULL,X"
- "'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E4758',"
- "X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473"
- "259');"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion76Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',76);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,mtime big"
- "int default 0,server_mtime bigint default 0,ctime bigint default 0,s"
- "erver_ctime bigint default 0,server_position_in_parent bigint defaul"
- "t 0,local_external_id bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0," LEGACY_PROTO_TIME_VALS(1)
- ",0,0,'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'')"
- ";"
- "INSERT INTO 'metas' VALUES(2,669,669," LEGACY_PROTO_TIME_VALS(2)
- ",-2097152,4,'s_ID_2','s_ID_9"
- "','s_ID_9','s_ID_2','s_ID_2',0,0,1,0,0,1,'Deleted Item','Deleted Ite"
- "m',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6"
- "D2F12084141534741534741',X'C28810260A17687474703A2F2F7777772E676F6F6"
- "76C652E636F6D2F32120B4153414447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681," LEGACY_PROTO_TIME_VALS(4)
- ",-3145728,3,'s_ID_4','s_ID_9"
- "','s_ID_9','s_ID_4','s_ID_4',0,0,1,0,0,1,'Welcome to Chromium','Welc"
- "ome to Chromium',NULL,NULL,X'C28810350A31687474703A2F2F7777772E676F6"
- "F676C652E636F6D2F6368726F6D652F696E746C2F656E2F77656C636F6D652E68746"
- "D6C1200',X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6"
- "368726F6D652F696E746C2F656E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677," LEGACY_PROTO_TIME_VALS(5)
- ",1048576,7,'s_ID_5','s_ID_9'"
- ",'s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','Google',NULL,NULL,"
- "X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D2F1208414741"
- "5347415347',X'C28810220A16687474703A2F2F7777772E676F6F676C652E636F6D"
- "2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694," LEGACY_PROTO_TIME_VALS(6)
- ",-4194304,6,'s_ID_6','s_ID_9"
- "','s_ID_9','r','r',0,0,0,1,1,0,'The Internet','The Internet',NULL,NU"
- "LL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(7,663,663," LEGACY_PROTO_TIME_VALS(7)
- ",1048576,0,'s_ID_7','r','r',"
- "'r','r',0,0,0,1,1,0,'Google Chrome','Google Chrome','google_chrome',"
- "NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664," LEGACY_PROTO_TIME_VALS(8)
- ",1048576,0,'s_ID_8','s_ID_7'"
- ",'s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmarks','google_chrome"
- "_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665," LEGACY_PROTO_TIME_VALS(9)
- ",1048576,1,'s_ID_9','s_ID_8'"
- ",'s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar','Bookmark Bar','b"
- "ookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666," LEGACY_PROTO_TIME_VALS(10)
- ",2097152,2,'s_ID_10','s_ID_"
- "8','s_ID_8','s_ID_9','r',0,0,0,1,1,0,'Other Bookmarks','Other Bookma"
- "rks','other_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683," LEGACY_PROTO_TIME_VALS(11)
- ",-1048576,8,'s_ID_11','s_ID"
- "_6','s_ID_6','r','s_ID_13',0,0,0,0,0,0,'Home (The Chromium Projects)"
- "','Home (The Chromium Projects)',NULL,NULL,X'C28810220A18687474703A2"
- "F2F6465762E6368726F6D69756D2E6F72672F1206414741545741',X'C28810290A1"
- "D687474703A2F2F6465762E6368726F6D69756D2E6F72672F6F74686572120841464"
- "14756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685," LEGACY_PROTO_TIME_VALS(12)
- ",0,9,'s_ID_12','s_ID_6','s_"
- "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
- "arks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687," LEGACY_PROTO_TIME_VALS(13)
- ",-917504,10,'s_ID_13','s_ID"
- "_6','s_ID_6','s_ID_11','s_ID_12',0,0,0,0,0,0,'ICANN | Internet Corpo"
- "ration for Assigned Names and Numbers','ICANN | Internet Corporation"
- " for Assigned Names and Numbers',NULL,NULL,X'C28810240A15687474703A2"
- "F2F7777772E6963616E6E2E636F6D2F120B504E474158463041414646',X'C288102"
- "00A15687474703A2F2F7777772E6963616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692," LEGACY_PROTO_TIME_VALS(14)
- ",1048576,11,'s_ID_14','s_ID"
- "_6','s_ID_6','s_ID_12','r',0,0,0,0,0,0,'The WebKit Open Source Proje"
- "ct','The WebKit Open Source Project',NULL,NULL,X'C288101A0A126874747"
- "03A2F2F7765626B69742E6F72672F1204504E4758',X'C288101C0A13687474703A2"
- "F2F7765626B69742E6F72672F781205504E473259');"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
- "9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-void MigrationTest::SetUpVersion77Database(sql::Connection* connection) {
- ASSERT_TRUE(connection->is_open());
- ASSERT_TRUE(connection->BeginTransaction());
- ASSERT_TRUE(connection->Execute(
- "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
- "INSERT INTO 'share_version' VALUES('nick@chromium.org',77);"
- "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
- "itial_sync_ended BOOLEAN default 0);"
- "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
- "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
- "_version bigint default -1,server_version bigint default 0,server_po"
- "sition_in_parent bigint default 0,local_external_id bigint default 0"
- ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
- "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
- "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
- "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
- "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
- "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
- "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
- "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
- "har,specifics blob,server_specifics blob);"
- "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1)
- ",'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
- "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
- META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
- "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
- "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
- "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
- "14447414447414447');"
- "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
- META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
- "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
- ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
- "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
- "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
- "E2F77656C636F6D652E68746D6C1200');"
- "INSERT INTO 'metas' VALUES(5,677,677,1048576,7," META_PROTO_TIMES_VALS(5)
- ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','"
- "Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E"
- "636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E67"
- "6F6F676C652E636F6D2F12084147464447415347');"
- "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
- META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
- ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
- ");"
- "INSERT INTO 'metas' VALUES(7,663,663,1048576,0," META_PROTO_TIMES_VALS(7)
- ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Goo"
- "gle Chrome','google_chrome',NULL,NULL,NULL);"
- "INSERT INTO 'metas' VALUES(8,664,664,1048576,0," META_PROTO_TIMES_VALS(8)
- ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmar"
- "ks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(9,665,665,1048576,1," META_PROTO_TIMES_VALS(9)
- ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar'"
- ",'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
- META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
- "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
- "LL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
- META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
- ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
- "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
- "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
- "8726F6D69756D2E6F72672F6F7468657212084146414756415346');"
- "INSERT INTO 'metas' VALUES(12,685,685,0,9," META_PROTO_TIMES_VALS(12)
- ",'s_ID_12','s_ID_6','s_"
- "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
- "arks',NULL,NULL,X'C2881000',X'C2881000');"
- "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
- META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
- "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
- "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
- "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
- "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
- "E6963616E6E2E636F6D2F120744414146415346');"
- "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
- META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
- ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
- "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
- "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
- "81205504E473259');"
- "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
- "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
- "ult -2, cache_guid TEXT , notification_state BLOB);"
- "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
- "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
- "9010788312004066376x-6609234393368420856x',NULL);"
- ));
- ASSERT_TRUE(connection->CommitTransaction());
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
-
- SetUpVersion67Database(&connection);
-
- // Columns existing before version 67.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "name"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unsanitized_name"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_name"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion67To68());
- ASSERT_EQ(68, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion68To69) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion68Database(&connection);
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion68To69());
- ASSERT_EQ(69, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
- sql::Statement s(connection.GetUniqueStatement("SELECT non_unique_name,"
- "is_del, is_dir, id, specifics, server_specifics FROM metas "
- "WHERE metahandle = 2"));
- ASSERT_TRUE(s.Step());
- ASSERT_EQ("Deleted Item", s.ColumnString(0));
- ASSERT_TRUE(s.ColumnBool(1));
- ASSERT_FALSE(s.ColumnBool(2));
- ASSERT_EQ("s_ID_2", s.ColumnString(3));
- sync_pb::EntitySpecifics specifics;
- specifics.ParseFromArray(s.ColumnBlob(4), s.ColumnByteLength(4));
- ASSERT_TRUE(specifics.has_bookmark());
- ASSERT_EQ("http://www.google.com/", specifics.bookmark().url());
- ASSERT_EQ("AASGASGA", specifics.bookmark().favicon());
- specifics.ParseFromArray(s.ColumnBlob(5), s.ColumnByteLength(5));
- ASSERT_TRUE(specifics.has_bookmark());
- ASSERT_EQ("http://www.google.com/2", specifics.bookmark().url());
- ASSERT_EQ("ASADGADGADG", specifics.bookmark().favicon());
- ASSERT_FALSE(s.Step());
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion69To70) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion69Database(&connection);
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "singleton_tag"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_server_tag"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unique_client_tag"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion69To70());
- ASSERT_EQ(70, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- }
-
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
- EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
- sql::Statement s(connection.GetUniqueStatement("SELECT id"
- " FROM metas WHERE unique_server_tag = 'google_chrome'"));
- ASSERT_TRUE(s.Step());
- EXPECT_EQ("s_ID_7", s.ColumnString(0));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion70To71) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion70Database(&connection);
-
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
- ASSERT_FALSE(connection.DoesTableExist("models"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion70To71());
- ASSERT_EQ(71, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "last_sync_timestamp"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "initial_sync_ended"));
- ASSERT_TRUE(connection.DoesTableExist("models"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "initial_sync_ended"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "model_id"));
-
- sql::Statement s(connection.GetUniqueStatement("SELECT model_id, "
- "initial_sync_ended, last_download_timestamp FROM models"));
- ASSERT_TRUE(s.Step());
- std::string model_id = s.ColumnString(0);
- EXPECT_EQ("C2881000", base::HexEncode(model_id.data(), model_id.size()))
- << "Model ID is expected to be the empty BookmarkSpecifics proto.";
- EXPECT_TRUE(s.ColumnBool(1));
- EXPECT_EQ(694, s.ColumnInt64(2));
- ASSERT_FALSE(s.Step());
-}
-
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion71To72) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion71Database(&connection);
-
- ASSERT_TRUE(connection.DoesTableExist("extended_attributes"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion71To72());
- ASSERT_EQ(72, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion72To73) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion72Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("share_info", "notification_state"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion72To73());
- ASSERT_EQ(73, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion73To74) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion73Database(&connection);
-
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
-
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion73To74());
- ASSERT_EQ(74, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion74To75) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion74Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_TRUE(connection.DoesColumnExist("models", "last_download_timestamp"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion74To75());
- ASSERT_EQ(75, dbs->GetVersion());
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion75To76) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion75Database(&connection);
-
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_TRUE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
- ASSERT_TRUE(connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion75To76());
- ASSERT_EQ(76, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- // Cannot actual refresh columns due to version 76 not containing all
- // necessary columns.
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion76To77) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion76Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
-
- EXPECT_EQ(GetExpectedLegacyMetaProtoTimes(INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(dbs->db_.get()));
- // Since the proto times are expected to be in a legacy format, they may not
- // be compatible with ProtoTimeToTime, so we don't call ExpectTimes().
-
- ASSERT_TRUE(dbs->MigrateVersion76To77());
- ASSERT_EQ(77, dbs->GetVersion());
-
- EXPECT_EQ(GetExpectedMetaProtoTimes(INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(dbs->db_.get()));
- // Cannot actually load entries due to version 77 not having all required
- // columns.
- ASSERT_FALSE(dbs->needs_column_refresh_);
-}
-
-TEST_F(DirectoryBackingStoreTest, MigrateVersion77To78) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion77Database(&connection);
-
- ASSERT_FALSE(connection.DoesColumnExist("metas", "BASE_SERVER_SPECIFICS"));
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_TRUE(dbs->MigrateVersion77To78());
- ASSERT_EQ(78, dbs->GetVersion());
-
- ASSERT_FALSE(dbs->needs_column_refresh_);
- }
-
- ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
-}
-
-TEST_P(MigrationTest, ToCurrentVersion) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- switch (GetParam()) {
- case 67:
- SetUpVersion67Database(&connection);
- break;
- case 68:
- SetUpVersion68Database(&connection);
- break;
- case 69:
- SetUpVersion69Database(&connection);
- break;
- case 70:
- SetUpVersion70Database(&connection);
- break;
- case 71:
- SetUpVersion71Database(&connection);
- break;
- case 72:
- SetUpVersion72Database(&connection);
- break;
- case 73:
- SetUpVersion73Database(&connection);
- break;
- case 74:
- SetUpVersion74Database(&connection);
- break;
- case 75:
- SetUpVersion75Database(&connection);
- break;
- case 76:
- SetUpVersion76Database(&connection);
- break;
- case 77:
- SetUpVersion77Database(&connection);
- break;
- default:
- // If you see this error, it may mean that you've increased the
- // database version number but you haven't finished adding unit tests
- // for the database migration code. You need to need to supply a
- // SetUpVersionXXDatabase function with a dump of the test database
- // at the old schema. Here's one way to do that:
- // 1. Start on a clean tree (with none of your pending schema changes).
- // 2. Set a breakpoint in this function and run the unit test.
- // 3. Allow this test to run to completion (step out of the call),
- // without allowing ~MigrationTest to execute.
- // 4. Examine this->temp_dir_ to determine the location of the
- // test database (it is currently of the version you need).
- // 5. Dump this using the sqlite3 command line tool:
- // > .output foo_dump.sql
- // > .dump
- // 6. Replace the timestamp columns with META_PROTO_TIMES(x) (or
- // LEGACY_META_PROTO_TIMES(x) if before Version 77).
- FAIL() << "Need to supply database dump for version " << GetParam();
- }
-
- syncable::Directory::KernelLoadInfo dir_info;
- MetahandlesIndex index;
- STLElementDeleter<MetahandlesIndex> index_deleter(&index);
-
- {
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(OPENED, dbs->Load(&index, &dir_info));
- ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
- }
-
- // Columns deleted in Version 67.
- ASSERT_FALSE(connection.DoesColumnExist("metas", "name"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "unsanitized_name"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "server_name"));
-
- // Columns added in Version 68.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "specifics"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "server_specifics"));
-
- // Columns deleted in Version 68.
- ASSERT_FALSE(connection.DoesColumnExist("metas", "is_bookmark_object"));
- ASSERT_FALSE(connection.DoesColumnExist("metas",
- "server_is_bookmark_object"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_favicon"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "bookmark_url"));
- ASSERT_FALSE(connection.DoesColumnExist("metas", "server_bookmark_url"));
-
- // Renamed a column in Version 70
- ASSERT_FALSE(connection.DoesColumnExist("metas", "singleton_tag"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_server_tag"));
- ASSERT_TRUE(connection.DoesColumnExist("metas", "unique_client_tag"));
-
- // Removed extended attributes in Version 72.
- ASSERT_FALSE(connection.DoesTableExist("extended_attributes"));
-
- // Columns added in Version 73.
- ASSERT_TRUE(connection.DoesColumnExist("share_info", "notification_state"));
-
- // Column replaced in version 75.
- ASSERT_TRUE(connection.DoesColumnExist("models", "progress_marker"));
- ASSERT_FALSE(connection.DoesColumnExist("models", "last_download_timestamp"));
-
- // Columns removed in version 76.
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
- ASSERT_FALSE(connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
-
- // Column added in version 78.
- ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
-
- // Check download_progress state (v75 migration)
- ASSERT_EQ(694,
- dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
- .timestamp_token_for_migration());
- ASSERT_FALSE(
- dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
- .has_token());
- ASSERT_EQ(32904,
- dir_info.kernel_info.download_progress[syncable::BOOKMARKS]
- .data_type_id());
- ASSERT_FALSE(
- dir_info.kernel_info.download_progress[syncable::THEMES]
- .has_timestamp_token_for_migration());
- ASSERT_TRUE(
- dir_info.kernel_info.download_progress[syncable::THEMES]
- .has_token());
- ASSERT_TRUE(
- dir_info.kernel_info.download_progress[syncable::THEMES]
- .token().empty());
- ASSERT_EQ(41210,
- dir_info.kernel_info.download_progress[syncable::THEMES]
- .data_type_id());
-
- // Check metas
- EXPECT_EQ(GetExpectedMetaProtoTimes(DONT_INCLUDE_DELETED_ITEMS),
- GetMetaProtoTimes(&connection));
- ExpectTimes(index, GetExpectedMetaTimes());
-
- MetahandlesIndex::iterator it = index.begin();
- ASSERT_TRUE(it != index.end());
- ASSERT_EQ(1, (*it)->ref(META_HANDLE));
- EXPECT_TRUE((*it)->ref(ID).IsRoot());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(6, (*it)->ref(META_HANDLE));
- EXPECT_TRUE((*it)->ref(IS_DIR));
- EXPECT_TRUE((*it)->ref(SERVER_IS_DIR));
- EXPECT_FALSE(
- (*it)->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- (*it)->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(7, (*it)->ref(META_HANDLE));
- EXPECT_EQ("google_chrome", (*it)->ref(UNIQUE_SERVER_TAG));
- EXPECT_FALSE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(8, (*it)->ref(META_HANDLE));
- EXPECT_EQ("google_chrome_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
- EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(9, (*it)->ref(META_HANDLE));
- EXPECT_EQ("bookmark_bar", (*it)->ref(UNIQUE_SERVER_TAG));
- EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(10, (*it)->ref(META_HANDLE));
- EXPECT_FALSE((*it)->ref(IS_DEL));
- EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_FALSE((*it)->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- (*it)->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE(
- (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
- EXPECT_EQ("other_bookmarks", (*it)->ref(UNIQUE_SERVER_TAG));
- EXPECT_EQ("Other Bookmarks", (*it)->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Other Bookmarks", (*it)->ref(SERVER_NON_UNIQUE_NAME));
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(11, (*it)->ref(META_HANDLE));
- EXPECT_FALSE((*it)->ref(IS_DEL));
- EXPECT_FALSE((*it)->ref(IS_DIR));
- EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_EQ("http://dev.chromium.org/",
- (*it)->ref(SPECIFICS).bookmark().url());
- EXPECT_EQ("AGATWA",
- (*it)->ref(SPECIFICS).bookmark().favicon());
- EXPECT_EQ("http://dev.chromium.org/other",
- (*it)->ref(SERVER_SPECIFICS).bookmark().url());
- EXPECT_EQ("AFAGVASF",
- (*it)->ref(SERVER_SPECIFICS).bookmark().favicon());
- EXPECT_EQ("", (*it)->ref(UNIQUE_SERVER_TAG));
- EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Home (The Chromium Projects)", (*it)->ref(SERVER_NON_UNIQUE_NAME));
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(12, (*it)->ref(META_HANDLE));
- EXPECT_FALSE((*it)->ref(IS_DEL));
- EXPECT_TRUE((*it)->ref(IS_DIR));
- EXPECT_EQ("Extra Bookmarks", (*it)->ref(NON_UNIQUE_NAME));
- EXPECT_EQ("Extra Bookmarks", (*it)->ref(SERVER_NON_UNIQUE_NAME));
- EXPECT_TRUE((*it)->ref(SPECIFICS).has_bookmark());
- EXPECT_TRUE((*it)->ref(SERVER_SPECIFICS).has_bookmark());
- EXPECT_FALSE(
- (*it)->ref(SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- (*it)->ref(SERVER_SPECIFICS).bookmark().has_url());
- EXPECT_FALSE(
- (*it)->ref(SPECIFICS).bookmark().has_favicon());
- EXPECT_FALSE((*it)->ref(SERVER_SPECIFICS).bookmark().has_favicon());
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(13, (*it)->ref(META_HANDLE));
-
- ASSERT_TRUE(++it != index.end());
- ASSERT_EQ(14, (*it)->ref(META_HANDLE));
-
- ASSERT_TRUE(++it == index.end());
-}
-
-INSTANTIATE_TEST_CASE_P(DirectoryBackingStore, MigrationTest,
- testing::Range(67, kCurrentDBVersion));
-
-TEST_F(DirectoryBackingStoreTest, ModelTypeIds) {
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- std::string model_id =
- TestDirectoryBackingStore::ModelTypeEnumToModelId(ModelTypeFromInt(i));
- EXPECT_EQ(i,
- TestDirectoryBackingStore::ModelIdToModelTypeEnum(model_id.data(),
- model_id.size()));
- }
-}
-
-// TODO(109668): This had to be disabled because the latest API will
-// intentionally crash when a database is this badly corrupted.
-TEST_F(DirectoryBackingStoreTest, DISABLED_Corruption) {
- {
- scoped_ptr<OnDiskDirectoryBackingStore> dbs(
- new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
- EXPECT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
- }
- std::string bad_data("BAD DATA");
- EXPECT_TRUE(file_util::WriteFile(GetDatabasePath(), bad_data.data(),
- bad_data.size()));
- {
- scoped_ptr<OnDiskDirectoryBackingStore> dbs(
- new OnDiskDirectoryBackingStore(GetUsername(), GetDatabasePath()));
-
- EXPECT_FALSE(LoadAndIgnoreReturnedData(dbs.get()));
- }
-}
-
-TEST_F(DirectoryBackingStoreTest, DeleteEntries) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
-
- SetUpCurrentDatabaseAndCheckVersion(&connection);
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- MetahandlesIndex index;
- Directory::KernelLoadInfo kernel_load_info;
- STLElementDeleter<MetahandlesIndex> index_deleter(&index);
-
- dbs->Load(&index, &kernel_load_info);
- size_t initial_size = index.size();
- ASSERT_LT(0U, initial_size) << "Test requires entries to delete.";
- int64 first_to_die = (*index.begin())->ref(META_HANDLE);
- MetahandleSet to_delete;
- to_delete.insert(first_to_die);
- EXPECT_TRUE(dbs->DeleteEntries(to_delete));
-
- STLDeleteElements(&index);
- dbs->LoadEntries(&index);
-
- EXPECT_EQ(initial_size - 1, index.size());
- bool delete_failed = false;
- for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
- ++it) {
- if ((*it)->ref(META_HANDLE) == first_to_die) {
- delete_failed = true;
- break;
- }
- }
- EXPECT_FALSE(delete_failed);
-
- to_delete.clear();
- for (MetahandlesIndex::iterator it = index.begin(); it != index.end();
- ++it) {
- to_delete.insert((*it)->ref(META_HANDLE));
- }
-
- EXPECT_TRUE(dbs->DeleteEntries(to_delete));
-
- STLDeleteElements(&index);
- dbs->LoadEntries(&index);
- EXPECT_EQ(0U, index.size());
-}
-
-TEST_F(DirectoryBackingStoreTest, GenerateCacheGUID) {
- const std::string& guid1 = TestDirectoryBackingStore::GenerateCacheGUID();
- const std::string& guid2 = TestDirectoryBackingStore::GenerateCacheGUID();
- EXPECT_EQ(24U, guid1.size());
- EXPECT_EQ(24U, guid2.size());
- // In theory this test can fail, but it won't before the universe
- // dies of heat death.
- EXPECT_NE(guid1, guid2);
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_change_delegate.h b/chrome/browser/sync/syncable/directory_change_delegate.h
deleted file mode 100644
index c6552cd..0000000
--- a/chrome/browser/sync/syncable/directory_change_delegate.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
-#pragma once
-
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "chrome/browser/sync/syncable/syncable.h"
-
-namespace syncable {
-
-// This is an interface for listening to directory change events, triggered by
-// the releasing of the syncable transaction. The delegate performs work to
-// 1. Calculate changes, depending on the source of the transaction
-// (HandleCalculateChangesChangeEventFromSyncer/Syncapi).
-// 2. Perform final work while the transaction is held
-// (HandleTransactionEndingChangeEvent).
-// 3. Perform any work that should be done after the transaction is released.
-// (HandleTransactionCompleteChangeEvent).
-//
-// Note that these methods may be called on *any* thread.
-class DirectoryChangeDelegate {
- public:
- virtual void HandleCalculateChangesChangeEventFromSyncApi(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans) = 0;
- virtual void HandleCalculateChangesChangeEventFromSyncer(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans) = 0;
- // Must return the set of all ModelTypes that were modified in the
- // transaction.
- virtual ModelTypeSet HandleTransactionEndingChangeEvent(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- BaseTransaction* trans) = 0;
- virtual void HandleTransactionCompleteChangeEvent(
- ModelTypeSet models_with_changes) = 0;
- protected:
- virtual ~DirectoryChangeDelegate() {}
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_CHANGE_DELEGATE_H_
diff --git a/chrome/browser/sync/syncable/in_memory_directory_backing_store.cc b/chrome/browser/sync/syncable/in_memory_directory_backing_store.cc
deleted file mode 100644
index 536f1a9..0000000
--- a/chrome/browser/sync/syncable/in_memory_directory_backing_store.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/in_memory_directory_backing_store.h"
-
-namespace syncable {
-
-InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
- const std::string& dir_name) : DirectoryBackingStore(dir_name) {
-}
-
-DirOpenResult InMemoryDirectoryBackingStore::Load(
- MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info) {
- if (!db_->is_open()) {
- if (!db_->OpenInMemory())
- return FAILED_OPEN_DATABASE;
- }
-
- if (!InitializeTables())
- return FAILED_OPEN_DATABASE;
-
- if (!LoadEntries(entry_bucket))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadInfo(kernel_load_info))
- return FAILED_DATABASE_CORRUPT;
-
- return OPENED;
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/in_memory_directory_backing_store.h b/chrome/browser/sync/syncable/in_memory_directory_backing_store.h
deleted file mode 100644
index b8c91e4..0000000
--- a/chrome/browser/sync/syncable/in_memory_directory_backing_store.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
-#pragma once
-
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-
-namespace syncable {
-
-// This implementation of DirectoryBackingStore is used in tests that do not
-// require us to write to a file. An in-memory sqlite database is much faster
-// than an on-disk database, so this can result in significant speedups in our
-// unit tests.
-//
-// An InMemoryDirectoryBackingStore cannot load data from existing databases.
-// When an InMemoryDirectoryBackingStore is destroyed, all data stored in this
-// database is lost. If these limitations are a problem for you, consider using
-// TestDirectoryBackingStore.
-class InMemoryDirectoryBackingStore : public DirectoryBackingStore {
- public:
- explicit InMemoryDirectoryBackingStore(const std::string& dir_name);
- virtual DirOpenResult Load(
- MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_IN_MEMORY_DIRECTORY_BACKING_STORE_H_
diff --git a/chrome/browser/sync/syncable/model_type.cc b/chrome/browser/sync/syncable/model_type.cc
deleted file mode 100644
index 58385d3..0000000
--- a/chrome/browser/sync/syncable/model_type.cc
+++ /dev/null
@@ -1,542 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/model_type.h"
-
-#include "base/string_split.h"
-#include "base/values.h"
-#include "chrome/browser/sync/engine/syncproto.h"
-#include "sync/protocol/app_notification_specifics.pb.h"
-#include "sync/protocol/app_setting_specifics.pb.h"
-#include "sync/protocol/app_specifics.pb.h"
-#include "sync/protocol/autofill_specifics.pb.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "sync/protocol/extension_setting_specifics.pb.h"
-#include "sync/protocol/extension_specifics.pb.h"
-#include "sync/protocol/nigori_specifics.pb.h"
-#include "sync/protocol/password_specifics.pb.h"
-#include "sync/protocol/preference_specifics.pb.h"
-#include "sync/protocol/search_engine_specifics.pb.h"
-#include "sync/protocol/session_specifics.pb.h"
-#include "sync/protocol/sync.pb.h"
-#include "sync/protocol/theme_specifics.pb.h"
-#include "sync/protocol/typed_url_specifics.pb.h"
-
-namespace syncable {
-
-void AddDefaultFieldValue(syncable::ModelType datatype,
- sync_pb::EntitySpecifics* specifics) {
- switch (datatype) {
- case BOOKMARKS:
- specifics->mutable_bookmark();
- break;
- case PASSWORDS:
- specifics->mutable_password();
- break;
- case PREFERENCES:
- specifics->mutable_preference();
- break;
- case AUTOFILL:
- specifics->mutable_autofill();
- break;
- case AUTOFILL_PROFILE:
- specifics->mutable_autofill_profile();
- break;
- case THEMES:
- specifics->mutable_theme();
- break;
- case TYPED_URLS:
- specifics->mutable_typed_url();
- break;
- case EXTENSIONS:
- specifics->mutable_extension();
- break;
- case NIGORI:
- specifics->mutable_nigori();
- break;
- case SEARCH_ENGINES:
- specifics->mutable_search_engine();
- break;
- case SESSIONS:
- specifics->mutable_session();
- break;
- case APPS:
- specifics->mutable_app();
- break;
- case APP_SETTINGS:
- specifics->mutable_app_setting();
- break;
- case EXTENSION_SETTINGS:
- specifics->mutable_extension_setting();
- break;
- case APP_NOTIFICATIONS:
- specifics->mutable_app_notification();
- break;
- default:
- NOTREACHED() << "No known extension for model type.";
- }
-}
-
-ModelType GetModelTypeFromSpecificsFieldNumber(int field_number) {
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- ModelType model_type = ModelTypeFromInt(i);
- if (GetSpecificsFieldNumberFromModelType(model_type) == field_number)
- return model_type;
- }
- NOTREACHED();
- return UNSPECIFIED;
-}
-
-int GetSpecificsFieldNumberFromModelType(ModelType model_type) {
- switch (model_type) {
- case BOOKMARKS:
- return sync_pb::EntitySpecifics::kBookmarkFieldNumber;
- break;
- case PASSWORDS:
- return sync_pb::EntitySpecifics::kPasswordFieldNumber;
- break;
- case PREFERENCES:
- return sync_pb::EntitySpecifics::kPreferenceFieldNumber;
- break;
- case AUTOFILL:
- return sync_pb::EntitySpecifics::kAutofillFieldNumber;
- break;
- case AUTOFILL_PROFILE:
- return sync_pb::EntitySpecifics::kAutofillProfileFieldNumber;
- break;
- case THEMES:
- return sync_pb::EntitySpecifics::kThemeFieldNumber;
- break;
- case TYPED_URLS:
- return sync_pb::EntitySpecifics::kTypedUrlFieldNumber;
- break;
- case EXTENSIONS:
- return sync_pb::EntitySpecifics::kExtensionFieldNumber;
- break;
- case NIGORI:
- return sync_pb::EntitySpecifics::kNigoriFieldNumber;
- break;
- case SEARCH_ENGINES:
- return sync_pb::EntitySpecifics::kSearchEngineFieldNumber;
- break;
- case SESSIONS:
- return sync_pb::EntitySpecifics::kSessionFieldNumber;
- break;
- case APPS:
- return sync_pb::EntitySpecifics::kAppFieldNumber;
- break;
- case APP_SETTINGS:
- return sync_pb::EntitySpecifics::kAppSettingFieldNumber;
- break;
- case EXTENSION_SETTINGS:
- return sync_pb::EntitySpecifics::kExtensionSettingFieldNumber;
- break;
- case APP_NOTIFICATIONS:
- return sync_pb::EntitySpecifics::kAppNotificationFieldNumber;
- break;
- default:
- NOTREACHED() << "No known extension for model type.";
- return 0;
- }
- NOTREACHED() << "Needed for linux_keep_shadow_stacks because of "
- << "http://gcc.gnu.org/bugzilla/show_bug.cgi?id=20681";
- return 0;
-}
-
-// Note: keep this consistent with GetModelType in syncable.cc!
-ModelType GetModelType(const sync_pb::SyncEntity& sync_pb_entity) {
- const browser_sync::SyncEntity& sync_entity =
- static_cast<const browser_sync::SyncEntity&>(sync_pb_entity);
- DCHECK(!sync_entity.id().IsRoot()); // Root shouldn't ever go over the wire.
-
- if (sync_entity.deleted())
- return UNSPECIFIED;
-
- // Backwards compatibility with old (pre-specifics) protocol.
- if (sync_entity.has_bookmarkdata())
- return BOOKMARKS;
-
- ModelType specifics_type = GetModelTypeFromSpecifics(sync_entity.specifics());
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
-
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!sync_entity.server_defined_unique_tag().empty() &&
- sync_entity.IsFolder()) {
- return TOP_LEVEL_FOLDER;
- }
-
- // This is an item of a datatype we can't understand. Maybe it's
- // from the future? Either we mis-encoded the object, or the
- // server sent us entries it shouldn't have.
- NOTREACHED() << "Unknown datatype in sync proto.";
- return UNSPECIFIED;
-}
-
-ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
- if (specifics.has_bookmark())
- return BOOKMARKS;
-
- if (specifics.has_password())
- return PASSWORDS;
-
- if (specifics.has_preference())
- return PREFERENCES;
-
- if (specifics.has_autofill())
- return AUTOFILL;
-
- if (specifics.has_autofill_profile())
- return AUTOFILL_PROFILE;
-
- if (specifics.has_theme())
- return THEMES;
-
- if (specifics.has_typed_url())
- return TYPED_URLS;
-
- if (specifics.has_extension())
- return EXTENSIONS;
-
- if (specifics.has_nigori())
- return NIGORI;
-
- if (specifics.has_app())
- return APPS;
-
- if (specifics.has_search_engine())
- return SEARCH_ENGINES;
-
- if (specifics.has_session())
- return SESSIONS;
-
- if (specifics.has_app_setting())
- return APP_SETTINGS;
-
- if (specifics.has_extension_setting())
- return EXTENSION_SETTINGS;
-
- if (specifics.has_app_notification())
- return APP_NOTIFICATIONS;
-
- return UNSPECIFIED;
-}
-
-bool ShouldMaintainPosition(ModelType model_type) {
- return model_type == BOOKMARKS;
-}
-
-const char* ModelTypeToString(ModelType model_type) {
- // This is used in serialization routines as well as for displaying debug
- // information. Do not attempt to change these string values unless you know
- // what you're doing.
- switch (model_type) {
- case TOP_LEVEL_FOLDER:
- return "Top Level Folder";
- case UNSPECIFIED:
- return "Unspecified";
- case BOOKMARKS:
- return "Bookmarks";
- case PREFERENCES:
- return "Preferences";
- case PASSWORDS:
- return "Passwords";
- case AUTOFILL:
- return "Autofill";
- case THEMES:
- return "Themes";
- case TYPED_URLS:
- return "Typed URLs";
- case EXTENSIONS:
- return "Extensions";
- case NIGORI:
- return "Encryption keys";
- case SEARCH_ENGINES:
- return "Search Engines";
- case SESSIONS:
- return "Sessions";
- case APPS:
- return "Apps";
- case AUTOFILL_PROFILE:
- return "Autofill Profiles";
- case APP_SETTINGS:
- return "App settings";
- case EXTENSION_SETTINGS:
- return "Extension settings";
- case APP_NOTIFICATIONS:
- return "App Notifications";
- default:
- break;
- }
- NOTREACHED() << "No known extension for model type.";
- return "INVALID";
-}
-
-StringValue* ModelTypeToValue(ModelType model_type) {
- if (model_type >= syncable::FIRST_REAL_MODEL_TYPE) {
- return Value::CreateStringValue(ModelTypeToString(model_type));
- } else if (model_type == syncable::TOP_LEVEL_FOLDER) {
- return Value::CreateStringValue("Top-level folder");
- } else if (model_type == syncable::UNSPECIFIED) {
- return Value::CreateStringValue("Unspecified");
- }
- NOTREACHED();
- return Value::CreateStringValue("");
-}
-
-ModelType ModelTypeFromValue(const Value& value) {
- if (value.IsType(Value::TYPE_STRING)) {
- std::string result;
- CHECK(value.GetAsString(&result));
- return ModelTypeFromString(result);
- } else if (value.IsType(Value::TYPE_INTEGER)) {
- int result;
- CHECK(value.GetAsInteger(&result));
- return ModelTypeFromInt(result);
- } else {
- NOTREACHED() << "Unsupported value type: " << value.GetType();
- return UNSPECIFIED;
- }
-}
-
-ModelType ModelTypeFromString(const std::string& model_type_string) {
- if (model_type_string == "Bookmarks")
- return BOOKMARKS;
- else if (model_type_string == "Preferences")
- return PREFERENCES;
- else if (model_type_string == "Passwords")
- return PASSWORDS;
- else if (model_type_string == "Autofill")
- return AUTOFILL;
- else if (model_type_string == "Autofill Profiles")
- return AUTOFILL_PROFILE;
- else if (model_type_string == "Themes")
- return THEMES;
- else if (model_type_string == "Typed URLs")
- return TYPED_URLS;
- else if (model_type_string == "Extensions")
- return EXTENSIONS;
- else if (model_type_string == "Encryption keys")
- return NIGORI;
- else if (model_type_string == "Search Engines")
- return SEARCH_ENGINES;
- else if (model_type_string == "Sessions")
- return SESSIONS;
- else if (model_type_string == "Apps")
- return APPS;
- else if (model_type_string == "App settings")
- return APP_SETTINGS;
- else if (model_type_string == "Extension settings")
- return EXTENSION_SETTINGS;
- else if (model_type_string == "App Notifications")
- return APP_NOTIFICATIONS;
- else
- NOTREACHED() << "No known model type corresponding to "
- << model_type_string << ".";
- return UNSPECIFIED;
-}
-
-std::string ModelTypeSetToString(ModelTypeSet model_types) {
- std::string result;
- for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
- if (!result.empty()) {
- result += ", ";
- }
- result += ModelTypeToString(it.Get());
- }
- return result;
-}
-
-base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types) {
- ListValue* value = new ListValue();
- for (ModelTypeSet::Iterator it = model_types.First(); it.Good(); it.Inc()) {
- value->Append(
- Value::CreateStringValue(ModelTypeToString(it.Get())));
- }
- return value;
-}
-
-ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value) {
- ModelTypeSet result;
- for (ListValue::const_iterator i = value.begin(); i != value.end(); ++i) {
- result.Put(ModelTypeFromValue(**i));
- }
- return result;
-}
-
-// TODO(zea): remove all hardcoded tags in model associators and have them use
-// this instead.
-std::string ModelTypeToRootTag(ModelType type) {
- switch (type) {
- case BOOKMARKS:
- return "google_chrome_bookmarks";
- case PREFERENCES:
- return "google_chrome_preferences";
- case PASSWORDS:
- return "google_chrome_passwords";
- case AUTOFILL:
- return "google_chrome_autofill";
- case THEMES:
- return "google_chrome_themes";
- case TYPED_URLS:
- return "google_chrome_typed_urls";
- case EXTENSIONS:
- return "google_chrome_extensions";
- case NIGORI:
- return "google_chrome_nigori";
- case SEARCH_ENGINES:
- return "google_chrome_search_engines";
- case SESSIONS:
- return "google_chrome_sessions";
- case APPS:
- return "google_chrome_apps";
- case AUTOFILL_PROFILE:
- return "google_chrome_autofill_profiles";
- case APP_SETTINGS:
- return "google_chrome_app_settings";
- case EXTENSION_SETTINGS:
- return "google_chrome_extension_settings";
- case APP_NOTIFICATIONS:
- return "google_chrome_app_notifications";
- default:
- break;
- }
- NOTREACHED() << "No known extension for model type.";
- return "INVALID";
-}
-
-// TODO(akalin): Figure out a better way to do these mappings.
-
-namespace {
-const char kBookmarkNotificationType[] = "BOOKMARK";
-const char kPreferenceNotificationType[] = "PREFERENCE";
-const char kPasswordNotificationType[] = "PASSWORD";
-const char kAutofillNotificationType[] = "AUTOFILL";
-const char kThemeNotificationType[] = "THEME";
-const char kTypedUrlNotificationType[] = "TYPED_URL";
-const char kExtensionNotificationType[] = "EXTENSION";
-const char kExtensionSettingNotificationType[] = "EXTENSION_SETTING";
-const char kNigoriNotificationType[] = "NIGORI";
-const char kAppSettingNotificationType[] = "APP_SETTING";
-const char kAppNotificationType[] = "APP";
-const char kSearchEngineNotificationType[] = "SEARCH_ENGINE";
-const char kSessionNotificationType[] = "SESSION";
-const char kAutofillProfileNotificationType[] = "AUTOFILL_PROFILE";
-const char kAppNotificationNotificationType[] = "APP_NOTIFICATION";
-} // namespace
-
-bool RealModelTypeToNotificationType(ModelType model_type,
- std::string* notification_type) {
- switch (model_type) {
- case BOOKMARKS:
- *notification_type = kBookmarkNotificationType;
- return true;
- case PREFERENCES:
- *notification_type = kPreferenceNotificationType;
- return true;
- case PASSWORDS:
- *notification_type = kPasswordNotificationType;
- return true;
- case AUTOFILL:
- *notification_type = kAutofillNotificationType;
- return true;
- case THEMES:
- *notification_type = kThemeNotificationType;
- return true;
- case TYPED_URLS:
- *notification_type = kTypedUrlNotificationType;
- return true;
- case EXTENSIONS:
- *notification_type = kExtensionNotificationType;
- return true;
- case NIGORI:
- *notification_type = kNigoriNotificationType;
- return true;
- case APP_SETTINGS:
- *notification_type = kAppNotificationType;
- return true;
- case APPS:
- *notification_type = kAppNotificationType;
- return true;
- case SEARCH_ENGINES:
- *notification_type = kSearchEngineNotificationType;
- return true;
- case SESSIONS:
- *notification_type = kSessionNotificationType;
- return true;
- case AUTOFILL_PROFILE:
- *notification_type = kAutofillProfileNotificationType;
- return true;
- case EXTENSION_SETTINGS:
- *notification_type = kExtensionSettingNotificationType;
- return true;
- case APP_NOTIFICATIONS:
- *notification_type = kAppNotificationNotificationType;
- return true;
- default:
- break;
- }
- notification_type->clear();
- return false;
-}
-
-bool NotificationTypeToRealModelType(const std::string& notification_type,
- ModelType* model_type) {
- if (notification_type == kBookmarkNotificationType) {
- *model_type = BOOKMARKS;
- return true;
- } else if (notification_type == kPreferenceNotificationType) {
- *model_type = PREFERENCES;
- return true;
- } else if (notification_type == kPasswordNotificationType) {
- *model_type = PASSWORDS;
- return true;
- } else if (notification_type == kAutofillNotificationType) {
- *model_type = AUTOFILL;
- return true;
- } else if (notification_type == kThemeNotificationType) {
- *model_type = THEMES;
- return true;
- } else if (notification_type == kTypedUrlNotificationType) {
- *model_type = TYPED_URLS;
- return true;
- } else if (notification_type == kExtensionNotificationType) {
- *model_type = EXTENSIONS;
- return true;
- } else if (notification_type == kNigoriNotificationType) {
- *model_type = NIGORI;
- return true;
- } else if (notification_type == kAppNotificationType) {
- *model_type = APPS;
- return true;
- } else if (notification_type == kSearchEngineNotificationType) {
- *model_type = SEARCH_ENGINES;
- return true;
- } else if (notification_type == kSessionNotificationType) {
- *model_type = SESSIONS;
- return true;
- } else if (notification_type == kAutofillProfileNotificationType) {
- *model_type = AUTOFILL_PROFILE;
- return true;
- } else if (notification_type == kAppSettingNotificationType) {
- *model_type = APP_SETTINGS;
- return true;
- } else if (notification_type == kExtensionSettingNotificationType) {
- *model_type = EXTENSION_SETTINGS;
- return true;
- } else if (notification_type == kAppNotificationNotificationType) {
- *model_type = APP_NOTIFICATIONS;
- return true;
- } else {
- *model_type = UNSPECIFIED;
- return false;
- }
-}
-
-bool IsRealDataType(ModelType model_type) {
- return model_type >= FIRST_REAL_MODEL_TYPE && model_type < MODEL_TYPE_COUNT;
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/model_type.h b/chrome/browser/sync/syncable/model_type.h
deleted file mode 100644
index 115d5a0..0000000
--- a/chrome/browser/sync/syncable/model_type.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Enumerate the various item subtypes that are supported by sync.
-// Each sync object is expected to have an immutable object type.
-// An object's type is inferred from the type of data it holds.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_H_
-#pragma once
-
-#include <set>
-#include <string>
-
-#include "base/logging.h"
-#include "base/time.h"
-#include "chrome/browser/sync/util/enum_set.h"
-
-namespace base {
-class ListValue;
-class StringValue;
-class Value;
-}
-
-namespace sync_pb {
-class EntitySpecifics;
-class SyncEntity;
-}
-
-namespace syncable {
-
-enum ModelType {
- // Object type unknown. Objects may transition through
- // the unknown state during their initial creation, before
- // their properties are set. After deletion, object types
- // are generally preserved.
- UNSPECIFIED,
- // A permanent folder whose children may be of mixed
- // datatypes (e.g. the "Google Chrome" folder).
- TOP_LEVEL_FOLDER,
-
- // ------------------------------------ Start of "real" model types.
- // The model types declared before here are somewhat special, as they
- // they do not correspond to any browser data model. The remaining types
- // are bona fide model types; all have a related browser data model and
- // can be represented in the protocol using a specific Message type in the
- // EntitySpecifics protocol buffer.
- //
- // A bookmark folder or a bookmark URL object.
- BOOKMARKS,
- FIRST_REAL_MODEL_TYPE = BOOKMARKS, // Declared 2nd, for debugger prettiness.
-
- // A preference folder or a preference object.
- PREFERENCES,
- // A password folder or password object.
- PASSWORDS,
- // An AutofillProfile Object
- AUTOFILL_PROFILE,
- // An autofill folder or an autofill object.
- AUTOFILL,
-
- // A themes folder or a themes object.
- THEMES,
- // A typed_url folder or a typed_url object.
- TYPED_URLS,
- // An extension folder or an extension object.
- EXTENSIONS,
- // An object representing a set of Nigori keys.
- NIGORI,
- // An object representing a custom search engine.
- SEARCH_ENGINES,
- // An object representing a browser session.
- SESSIONS,
- // An app folder or an app object.
- APPS,
- // An app setting from the extension settings API.
- APP_SETTINGS,
- // An extension setting from the extension settings API.
- EXTENSION_SETTINGS,
- // App notifications.
- APP_NOTIFICATIONS,
- LAST_REAL_MODEL_TYPE = APP_NOTIFICATIONS,
-
- // If you are adding a new sync datatype that is exposed to the user via the
- // sync preferences UI, be sure to update the list in
- // chrome/browser/sync/user_selectable_sync_type.h so that the UMA histograms
- // for sync include your new type.
-
- MODEL_TYPE_COUNT,
-};
-
-typedef browser_sync::EnumSet<
- ModelType, FIRST_REAL_MODEL_TYPE, LAST_REAL_MODEL_TYPE> ModelTypeSet;
-typedef browser_sync::EnumSet<
- ModelType, UNSPECIFIED, LAST_REAL_MODEL_TYPE> FullModelTypeSet;
-
-inline ModelType ModelTypeFromInt(int i) {
- DCHECK_GE(i, 0);
- DCHECK_LT(i, MODEL_TYPE_COUNT);
- return static_cast<ModelType>(i);
-}
-
-void AddDefaultFieldValue(syncable::ModelType datatype,
- sync_pb::EntitySpecifics* specifics);
-
-// Extract the model type of a SyncEntity protocol buffer. ModelType is a
-// local concept: the enum is not in the protocol. The SyncEntity's ModelType
-// is inferred from the presence of particular datatype field in the
-// entity specifics.
-ModelType GetModelType(const sync_pb::SyncEntity& sync_entity);
-
-// Extract the model type from an EntitySpecifics field. Note that there
-// are some ModelTypes (like TOP_LEVEL_FOLDER) that can't be inferred this way;
-// prefer using GetModelType where possible.
-ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics);
-
-// If this returns false, we shouldn't bother maintaining a position
-// value (sibling ordering) for this item.
-bool ShouldMaintainPosition(ModelType model_type);
-
-// Determine a model type from the field number of its associated
-// EntitySpecifics field.
-ModelType GetModelTypeFromSpecificsFieldNumber(int field_number);
-
-// Return the field number of the EntitySpecifics field associated with
-// a model type.
-int GetSpecificsFieldNumberFromModelType(ModelType model_type);
-
-// TODO(sync): The functions below badly need some cleanup.
-
-// Returns a pointer to a string with application lifetime that represents
-// the name of |model_type|.
-const char* ModelTypeToString(ModelType model_type);
-
-// Handles all model types, and not just real ones.
-//
-// Caller takes ownership of returned value.
-base::StringValue* ModelTypeToValue(ModelType model_type);
-
-// Converts a Value into a ModelType - complement to ModelTypeToValue().
-ModelType ModelTypeFromValue(const base::Value& value);
-
-// Returns the ModelType corresponding to the name |model_type_string|.
-ModelType ModelTypeFromString(const std::string& model_type_string);
-
-std::string ModelTypeSetToString(ModelTypeSet model_types);
-
-// Caller takes ownership of returned list.
-base::ListValue* ModelTypeSetToValue(ModelTypeSet model_types);
-
-ModelTypeSet ModelTypeSetFromValue(const base::ListValue& value);
-
-// Returns a string corresponding to the syncable tag for this datatype.
-std::string ModelTypeToRootTag(ModelType type);
-
-// Convert a real model type to a notification type (used for
-// subscribing to server-issued notifications). Returns true iff
-// |model_type| was a real model type and |notification_type| was
-// filled in.
-bool RealModelTypeToNotificationType(ModelType model_type,
- std::string* notification_type);
-
-// Converts a notification type to a real model type. Returns true
-// iff |notification_type| was the notification type of a real model
-// type and |model_type| was filled in.
-bool NotificationTypeToRealModelType(const std::string& notification_type,
- ModelType* model_type);
-
-// Returns true if |model_type| is a real datatype
-bool IsRealDataType(ModelType model_type);
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_H_
diff --git a/chrome/browser/sync/syncable/model_type_payload_map.cc b/chrome/browser/sync/syncable/model_type_payload_map.cc
deleted file mode 100644
index 7c73ba9..0000000
--- a/chrome/browser/sync/syncable/model_type_payload_map.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/model_type_payload_map.h"
-
-#include <vector>
-
-#include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/values.h"
-
-using browser_sync::ModelSafeRoutingInfo;
-namespace syncable {
-
-ModelTypePayloadMap ModelTypePayloadMapFromEnumSet(
- syncable::ModelTypeSet types,
- const std::string& payload) {
- ModelTypePayloadMap types_with_payloads;
- for (syncable::ModelTypeSet::Iterator it = types.First();
- it.Good(); it.Inc()) {
- types_with_payloads[it.Get()] = payload;
- }
- return types_with_payloads;
-}
-
-ModelTypeSet ModelTypePayloadMapToEnumSet(
- const ModelTypePayloadMap& payload_map) {
- ModelTypeSet types;
- for (ModelTypePayloadMap::const_iterator it = payload_map.begin();
- it != payload_map.end(); ++it) {
- types.Put(it->first);
- }
- return types;
-}
-
-ModelTypePayloadMap ModelTypePayloadMapFromRoutingInfo(
- const browser_sync::ModelSafeRoutingInfo& routes,
- const std::string& payload) {
- ModelTypePayloadMap types_with_payloads;
- for (browser_sync::ModelSafeRoutingInfo::const_iterator i = routes.begin();
- i != routes.end(); ++i) {
- types_with_payloads[i->first] = payload;
- }
- return types_with_payloads;
-}
-
-std::string ModelTypePayloadMapToString(
- const ModelTypePayloadMap& type_payloads) {
- scoped_ptr<DictionaryValue> value(
- ModelTypePayloadMapToValue(type_payloads));
- std::string json;
- base::JSONWriter::Write(value.get(), false, &json);
- return json;
-}
-
-DictionaryValue* ModelTypePayloadMapToValue(
- const ModelTypePayloadMap& type_payloads) {
- DictionaryValue* value = new DictionaryValue();
- for (ModelTypePayloadMap::const_iterator it = type_payloads.begin();
- it != type_payloads.end(); ++it) {
- value->SetString(syncable::ModelTypeToString(it->first), it->second);
- }
- return value;
-}
-
-void CoalescePayloads(ModelTypePayloadMap* original,
- const ModelTypePayloadMap& update) {
- for (ModelTypePayloadMap::const_iterator i = update.begin();
- i != update.end(); ++i) {
- if (original->count(i->first) == 0) {
- // If this datatype isn't already in our map, add it with
- // whatever payload it has.
- (*original)[i->first] = i->second;
- } else if (i->second.length() > 0) {
- // If this datatype is already in our map, we only overwrite the
- // payload if the new one is non-empty.
- (*original)[i->first] = i->second;
- }
- }
-}
-
-void PurgeStalePayload(ModelTypePayloadMap* original,
- const ModelSafeRoutingInfo& routing_info) {
- std::vector<ModelTypePayloadMap::iterator> iterators_to_delete;
- for (ModelTypePayloadMap::iterator i = original->begin();
- i != original->end(); ++i) {
- if (routing_info.end() == routing_info.find(i->first)) {
- iterators_to_delete.push_back(i);
- }
- }
-
- for (std::vector<ModelTypePayloadMap::iterator>::iterator
- it = iterators_to_delete.begin(); it != iterators_to_delete.end();
- ++it) {
- original->erase(*it);
- }
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/model_type_payload_map.h b/chrome/browser/sync/syncable/model_type_payload_map.h
deleted file mode 100644
index c390d20a..0000000
--- a/chrome/browser/sync/syncable/model_type_payload_map.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Definition of ModelTypePayloadMap and various utility functions.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
-#pragma once
-
-#include <map>
-#include <string>
-
-#include "chrome/browser/sync/engine/model_safe_worker.h"
-#include "chrome/browser/sync/syncable/model_type.h"
-
-namespace base {
-class DictionaryValue;
-}
-
-namespace syncable {
-
-// A container that contains a set of datatypes with possible string
-// payloads.
-typedef std::map<ModelType, std::string> ModelTypePayloadMap;
-
-// Helper functions for building ModelTypePayloadMaps.
-
-// Make a TypePayloadMap from all the types in a ModelTypeSet using a
-// default payload.
-ModelTypePayloadMap ModelTypePayloadMapFromEnumSet(
- ModelTypeSet model_types, const std::string& payload);
-
-ModelTypeSet ModelTypePayloadMapToEnumSet(
- const ModelTypePayloadMap& payload_map);
-
-// Make a TypePayloadMap for all the enabled types in a
-// ModelSafeRoutingInfo using a default payload.
-ModelTypePayloadMap ModelTypePayloadMapFromRoutingInfo(
- const browser_sync::ModelSafeRoutingInfo& routes,
- const std::string& payload);
-
-std::string ModelTypePayloadMapToString(
- const ModelTypePayloadMap& model_type_payloads);
-
-// Caller takes ownership of the returned dictionary.
-base::DictionaryValue* ModelTypePayloadMapToValue(
- const ModelTypePayloadMap& model_type_payloads);
-
-// Coalesce |update| into |original|, overwriting only when |update| has
-// a non-empty payload.
-void CoalescePayloads(ModelTypePayloadMap* original,
- const ModelTypePayloadMap& update);
-
-void PurgeStalePayload(ModelTypePayloadMap* original,
- const browser_sync::ModelSafeRoutingInfo& routing_info);
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_PAYLOAD_MAP_H_
diff --git a/chrome/browser/sync/syncable/model_type_payload_map_unittest.cc b/chrome/browser/sync/syncable/model_type_payload_map_unittest.cc
deleted file mode 100644
index 28e30f9..0000000
--- a/chrome/browser/sync/syncable/model_type_payload_map_unittest.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/model_type_payload_map.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncable {
-namespace {
-
-using base::ExpectDictStringValue;
-
-class ModelTypePayloadMapTest : public testing::Test {};
-
-TEST_F(ModelTypePayloadMapTest, TypePayloadMapToSet) {
- ModelTypePayloadMap payloads;
- payloads[BOOKMARKS] = "bookmarkpayload";
- payloads[APPS] = "";
-
- const ModelTypeSet types(BOOKMARKS, APPS);
- EXPECT_TRUE(ModelTypePayloadMapToEnumSet(payloads).Equals(types));
-}
-
-TEST_F(ModelTypePayloadMapTest, TypePayloadMapToValue) {
- ModelTypePayloadMap payloads;
- payloads[BOOKMARKS] = "bookmarkpayload";
- payloads[APPS] = "";
-
- scoped_ptr<DictionaryValue> value(ModelTypePayloadMapToValue(payloads));
- EXPECT_EQ(2u, value->size());
- ExpectDictStringValue("bookmarkpayload", *value, "Bookmarks");
- ExpectDictStringValue("", *value, "Apps");
- EXPECT_FALSE(value->HasKey("Preferences"));
-}
-
-} // namespace
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/model_type_test_util.cc b/chrome/browser/sync/syncable/model_type_test_util.cc
deleted file mode 100644
index 49d0b73..0000000
--- a/chrome/browser/sync/syncable/model_type_test_util.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/model_type_test_util.h"
-
-namespace syncable {
-
-void PrintTo(ModelTypeSet model_types, ::std::ostream* os) {
- *os << ModelTypeSetToString(model_types);
-}
-
-namespace {
-
-// Matcher implementation for HasModelTypes().
-class HasModelTypesMatcher
- : public ::testing::MatcherInterface<ModelTypeSet> {
- public:
- explicit HasModelTypesMatcher(ModelTypeSet expected_types)
- : expected_types_(expected_types) {}
-
- virtual ~HasModelTypesMatcher() {}
-
- virtual bool MatchAndExplain(
- ModelTypeSet model_types,
- ::testing::MatchResultListener* listener) const {
- // No need to annotate listener since we already define PrintTo().
- return model_types.Equals(expected_types_);
- }
-
- virtual void DescribeTo(::std::ostream* os) const {
- *os << "has model types " << ModelTypeSetToString(expected_types_);
- }
-
- virtual void DescribeNegationTo(::std::ostream* os) const {
- *os << "doesn't have model types "
- << ModelTypeSetToString(expected_types_);
- }
-
- private:
- const ModelTypeSet expected_types_;
-
- DISALLOW_COPY_AND_ASSIGN(HasModelTypesMatcher);
-};
-
-} // namespace
-
-::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types) {
- return ::testing::MakeMatcher(new HasModelTypesMatcher(expected_types));
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/model_type_test_util.h b/chrome/browser/sync/syncable/model_type_test_util.h
deleted file mode 100644
index a3b707e..0000000
--- a/chrome/browser/sync/syncable/model_type_test_util.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
-#pragma once
-
-#include <ostream>
-
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace syncable {
-
-// Defined for googletest. Forwards to ModelTypeSetToString().
-void PrintTo(ModelTypeSet model_types, ::std::ostream* os);
-
-// A gmock matcher for ModelTypeSet. Use like:
-//
-// EXPECT_CALL(mock, ProcessModelTypes(HasModelTypes(expected_types)));
-::testing::Matcher<ModelTypeSet> HasModelTypes(ModelTypeSet expected_types);
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_MODEL_TYPE_TEST_UTIL_H_
diff --git a/chrome/browser/sync/syncable/model_type_unittest.cc b/chrome/browser/sync/syncable/model_type_unittest.cc
deleted file mode 100644
index f288674..0000000
--- a/chrome/browser/sync/syncable/model_type_unittest.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/model_type.h"
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncable {
-namespace {
-
-class ModelTypeTest : public testing::Test {};
-
-TEST_F(ModelTypeTest, ModelTypeToValue) {
- for (int i = syncable::FIRST_REAL_MODEL_TYPE;
- i < syncable::MODEL_TYPE_COUNT; ++i) {
- ModelType model_type = ModelTypeFromInt(i);
- base::ExpectStringValue(ModelTypeToString(model_type),
- ModelTypeToValue(model_type));
- }
- base::ExpectStringValue("Top-level folder",
- ModelTypeToValue(TOP_LEVEL_FOLDER));
- base::ExpectStringValue("Unspecified",
- ModelTypeToValue(UNSPECIFIED));
-}
-
-TEST_F(ModelTypeTest, ModelTypeFromValue) {
- for (int i = syncable::FIRST_REAL_MODEL_TYPE;
- i < syncable::MODEL_TYPE_COUNT; ++i) {
- ModelType model_type = ModelTypeFromInt(i);
- scoped_ptr<StringValue> value(ModelTypeToValue(model_type));
- EXPECT_EQ(model_type, ModelTypeFromValue(*value));
- }
-}
-
-TEST_F(ModelTypeTest, ModelTypeSetToValue) {
- const ModelTypeSet model_types(syncable::BOOKMARKS, syncable::APPS);
-
- scoped_ptr<ListValue> value(ModelTypeSetToValue(model_types));
- EXPECT_EQ(2u, value->GetSize());
- std::string types[2];
- EXPECT_TRUE(value->GetString(0, &types[0]));
- EXPECT_TRUE(value->GetString(1, &types[1]));
- EXPECT_EQ("Bookmarks", types[0]);
- EXPECT_EQ("Apps", types[1]);
-}
-
-TEST_F(ModelTypeTest, ModelTypeSetFromValue) {
- // Try empty set first.
- ModelTypeSet model_types;
- scoped_ptr<ListValue> value(ModelTypeSetToValue(model_types));
- EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
-
- // Now try with a few random types.
- model_types.Put(BOOKMARKS);
- model_types.Put(APPS);
- value.reset(ModelTypeSetToValue(model_types));
- EXPECT_TRUE(model_types.Equals(ModelTypeSetFromValue(*value)));
-}
-
-TEST_F(ModelTypeTest, IsRealDataType) {
- EXPECT_FALSE(IsRealDataType(UNSPECIFIED));
- EXPECT_FALSE(IsRealDataType(MODEL_TYPE_COUNT));
- EXPECT_FALSE(IsRealDataType(TOP_LEVEL_FOLDER));
- EXPECT_TRUE(IsRealDataType(FIRST_REAL_MODEL_TYPE));
- EXPECT_TRUE(IsRealDataType(BOOKMARKS));
- EXPECT_TRUE(IsRealDataType(APPS));
-}
-
-} // namespace
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/on_disk_directory_backing_store.cc b/chrome/browser/sync/syncable/on_disk_directory_backing_store.cc
deleted file mode 100644
index be4c686..0000000
--- a/chrome/browser/sync/syncable/on_disk_directory_backing_store.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/on_disk_directory_backing_store.h"
-
-#include "base/logging.h"
-
-namespace syncable {
-
-OnDiskDirectoryBackingStore::OnDiskDirectoryBackingStore(
- const std::string& dir_name, const FilePath& backing_filepath)
- : DirectoryBackingStore(dir_name),
- backing_filepath_(backing_filepath) {
- db_->set_exclusive_locking();
- db_->set_page_size(4096);
-}
-
-DirOpenResult OnDiskDirectoryBackingStore::Load(
- MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info) {
- DCHECK(CalledOnValidThread());
- if (!db_->is_open()) {
- if (!db_->Open(backing_filepath_))
- return FAILED_OPEN_DATABASE;
- }
-
- if (!InitializeTables())
- return FAILED_OPEN_DATABASE;
-
- if (!DropDeletedEntries())
- return FAILED_DATABASE_CORRUPT;
- if (!LoadEntries(entry_bucket))
- return FAILED_DATABASE_CORRUPT;
- if (!LoadInfo(kernel_load_info))
- return FAILED_DATABASE_CORRUPT;
-
- return OPENED;
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/on_disk_directory_backing_store.h b/chrome/browser/sync/syncable/on_disk_directory_backing_store.h
deleted file mode 100644
index 422ef40..0000000
--- a/chrome/browser/sync/syncable/on_disk_directory_backing_store.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
-#pragma once
-
-#include "base/file_path.h"
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-
-namespace syncable {
-
-// This is the concrete class that provides a useful implementation of
-// DirectoryBackingStore.
-class OnDiskDirectoryBackingStore : public DirectoryBackingStore {
- public:
- OnDiskDirectoryBackingStore(const std::string& dir_name,
- const FilePath& backing_filepath);
- virtual DirOpenResult Load(
- MetahandlesIndex* entry_bucket,
- Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
-
- private:
- FilePath backing_filepath_;
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_ON_DISK_DIRECTORY_BACKING_STORE_H_
diff --git a/chrome/browser/sync/syncable/syncable-inl.h b/chrome/browser/sync/syncable/syncable-inl.h
deleted file mode 100644
index c5beed9..0000000
--- a/chrome/browser/sync/syncable/syncable-inl.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
-#pragma once
-
-namespace syncable {
-
-template <typename FieldType, FieldType field_index>
-class LessField {
- public:
- inline bool operator() (const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- return a->ref(field_index) < b->ref(field_index);
- }
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
deleted file mode 100644
index cdfde0c..0000000
--- a/chrome/browser/sync/syncable/syncable.cc
+++ /dev/null
@@ -1,2405 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/syncable.h"
-
-#include <algorithm>
-#include <cstring>
-#include <functional>
-#include <iomanip>
-#include <iterator>
-#include <limits>
-#include <set>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/compiler_specific.h"
-#include "base/debug/trace_event.h"
-#include "base/file_util.h"
-#include "base/hash_tables.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/perftimer.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
-#include "base/string_util.h"
-#include "base/time.h"
-#include "base/utf_string_conversions.h"
-#include "base/values.h"
-#include "chrome/browser/sync/protocol/proto_value_conversions.h"
-#include "chrome/browser/sync/protocol/service_constants.h"
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-#include "chrome/browser/sync/syncable/directory_change_delegate.h"
-#include "chrome/browser/sync/syncable/in_memory_directory_backing_store.h"
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "chrome/browser/sync/syncable/on_disk_directory_backing_store.h"
-#include "chrome/browser/sync/syncable/syncable-inl.h"
-#include "chrome/browser/sync/syncable/syncable_changes_version.h"
-#include "chrome/browser/sync/syncable/syncable_columns.h"
-#include "chrome/browser/sync/syncable/syncable_enum_conversions.h"
-#include "chrome/browser/sync/syncable/transaction_observer.h"
-#include "chrome/browser/sync/util/logging.h"
-#include "chrome/browser/sync/util/cryptographer.h"
-#include "net/base/escape.h"
-
-namespace {
-
-enum InvariantCheckLevel {
- OFF = 0,
- VERIFY_IN_MEMORY = 1,
- FULL_DB_VERIFICATION = 2
-};
-
-const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
-
-// Max number of milliseconds to spend checking syncable entry invariants
-const int kInvariantCheckMaxMs = 50;
-
-// This function checks to see if the given list of Metahandles has any nodes
-// whose PREV_ID, PARENT_ID or NEXT_ID values refer to ID values that do not
-// actually exist. Returns true on success.
-//
-// This function is "Unsafe" because it does not attempt to acquire any locks
-// that may be protecting this list that gets passed in. The caller is
-// responsible for ensuring that no one modifies this list while the function is
-// running.
-bool VerifyReferenceIntegrityUnsafe(const syncable::MetahandlesIndex &index) {
- TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
- using namespace syncable;
- typedef base::hash_set<std::string> IdsSet;
-
- IdsSet ids_set;
- bool is_ok = true;
-
- for (MetahandlesIndex::const_iterator it = index.begin();
- it != index.end(); ++it) {
- EntryKernel* entry = *it;
- bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
- is_ok = is_ok && !is_duplicate_id;
- }
-
- IdsSet::iterator end = ids_set.end();
- for (MetahandlesIndex::const_iterator it = index.begin();
- it != index.end(); ++it) {
- EntryKernel* entry = *it;
- bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
- bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
- bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
- is_ok = is_ok && prev_exists && parent_exists && next_exists;
- }
- return is_ok;
-}
-
-} // namespace
-
-using std::string;
-using browser_sync::Encryptor;
-using browser_sync::ReportUnrecoverableErrorFunction;
-using browser_sync::UnrecoverableErrorHandler;
-
-namespace syncable {
-
-namespace {
-
-// Function to handle runtime failures on syncable code. Rather than crashing,
-// if the |condition| is false the following will happen:
-// 1. Sets unrecoverable error on transaction.
-// 2. Returns false.
-bool SyncAssert(bool condition,
- const tracked_objects::Location& location,
- const char* msg,
- BaseTransaction* trans) {
- if (!condition) {
- trans->OnUnrecoverableError(location, msg);
- return false;
- }
- return true;
-}
-
-} // namespace
-
-#define ENUM_CASE(x) case x: return #x; break
-
-std::string WriterTagToString(WriterTag writer_tag) {
- switch (writer_tag) {
- ENUM_CASE(INVALID);
- ENUM_CASE(SYNCER);
- ENUM_CASE(AUTHWATCHER);
- ENUM_CASE(UNITTEST);
- ENUM_CASE(VACUUM_AFTER_SAVE);
- ENUM_CASE(PURGE_ENTRIES);
- ENUM_CASE(SYNCAPI);
- };
- NOTREACHED();
- return "";
-}
-
-#undef ENUM_CASE
-
-WriteTransactionInfo::WriteTransactionInfo(
- int64 id,
- tracked_objects::Location location,
- WriterTag writer,
- ImmutableEntryKernelMutationMap mutations)
- : id(id),
- location_string(location.ToString()),
- writer(writer),
- mutations(mutations) {}
-
-WriteTransactionInfo::WriteTransactionInfo()
- : id(-1), writer(INVALID) {}
-
-WriteTransactionInfo::~WriteTransactionInfo() {}
-
-base::DictionaryValue* WriteTransactionInfo::ToValue(
- size_t max_mutations_size) const {
- DictionaryValue* dict = new DictionaryValue();
- dict->SetString("id", base::Int64ToString(id));
- dict->SetString("location", location_string);
- dict->SetString("writer", WriterTagToString(writer));
- Value* mutations_value = NULL;
- const size_t mutations_size = mutations.Get().size();
- if (mutations_size <= max_mutations_size) {
- mutations_value = EntryKernelMutationMapToValue(mutations.Get());
- } else {
- mutations_value =
- Value::CreateStringValue(
- base::Uint64ToString(static_cast<uint64>(mutations_size)) +
- " mutations");
- }
- dict->Set("mutations", mutations_value);
- return dict;
-}
-
-DictionaryValue* EntryKernelMutationToValue(
- const EntryKernelMutation& mutation) {
- DictionaryValue* dict = new DictionaryValue();
- dict->Set("original", mutation.original.ToValue());
- dict->Set("mutated", mutation.mutated.ToValue());
- return dict;
-}
-
-ListValue* EntryKernelMutationMapToValue(
- const EntryKernelMutationMap& mutations) {
- ListValue* list = new ListValue();
- for (EntryKernelMutationMap::const_iterator it = mutations.begin();
- it != mutations.end(); ++it) {
- list->Append(EntryKernelMutationToValue(it->second));
- }
- return list;
-}
-
-namespace {
-
-// A ScopedIndexUpdater temporarily removes an entry from an index,
-// and restores it to the index when the scope exits. This simplifies
-// the common pattern where items need to be removed from an index
-// before updating the field.
-//
-// This class is parameterized on the Indexer traits type, which
-// must define a Comparator and a static bool ShouldInclude
-// function for testing whether the item ought to be included
-// in the index.
-template<typename Indexer>
-class ScopedIndexUpdater {
- public:
- ScopedIndexUpdater(const ScopedKernelLock& proof_of_lock,
- EntryKernel* entry,
- typename Index<Indexer>::Set* index)
- : entry_(entry),
- index_(index) {
- // First call to ShouldInclude happens before the field is updated.
- if (Indexer::ShouldInclude(entry_)) {
- // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting
- // this class into a function.
- CHECK(index_->erase(entry_));
- }
- }
-
- ~ScopedIndexUpdater() {
- // Second call to ShouldInclude happens after the field is updated.
- if (Indexer::ShouldInclude(entry_)) {
- // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting
- // this class into a function.
- CHECK(index_->insert(entry_).second);
- }
- }
- private:
- // The entry that was temporarily removed from the index.
- EntryKernel* entry_;
- // The index which we are updating.
- typename Index<Indexer>::Set* const index_;
-};
-
-// Helper function to add an item to the index, if it ought to be added.
-template<typename Indexer>
-void InitializeIndexEntry(EntryKernel* entry,
- typename Index<Indexer>::Set* index) {
- if (Indexer::ShouldInclude(entry)) {
- index->insert(entry);
- }
-}
-
-} // namespace
-
-///////////////////////////////////////////////////////////////////////////
-// Comparator and filter functions for the indices.
-
-// static
-bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
- return !a->ref(UNIQUE_CLIENT_TAG).empty();
-}
-
-bool ParentIdAndHandleIndexer::Comparator::operator() (
- const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
- if (cmp != 0)
- return cmp < 0;
-
- int64 a_position = a->ref(SERVER_POSITION_IN_PARENT);
- int64 b_position = b->ref(SERVER_POSITION_IN_PARENT);
- if (a_position != b_position)
- return a_position < b_position;
-
- cmp = a->ref(ID).compare(b->ref(ID));
- return cmp < 0;
-}
-
-// static
-bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
- // This index excludes deleted items and the root item. The root
- // item is excluded so that it doesn't show up as a child of itself.
- return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
-}
-
-///////////////////////////////////////////////////////////////////////////
-// EntryKernel
-
-EntryKernel::EntryKernel() : dirty_(false) {
- // Everything else should already be default-initialized.
- for (int i = INT64_FIELDS_BEGIN; i < INT64_FIELDS_END; ++i) {
- int64_fields[i] = 0;
- }
-}
-
-EntryKernel::~EntryKernel() {}
-
-syncable::ModelType EntryKernel::GetServerModelType() const {
- ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS));
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
- if (ref(ID).IsRoot())
- return TOP_LEVEL_FOLDER;
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
- return TOP_LEVEL_FOLDER;
-
- return UNSPECIFIED;
-}
-
-bool EntryKernel::ContainsString(const std::string& lowercase_query) const {
- // TODO(lipalani) - figure out what to do if the node is encrypted.
- const sync_pb::EntitySpecifics& specifics = ref(SPECIFICS);
- std::string temp;
- // The protobuf serialized string contains the original strings. So
- // we will just serialize it and search it.
- specifics.SerializeToString(&temp);
-
- // Now convert to lower case.
- StringToLowerASCII(&temp);
-
- if (temp.find(lowercase_query) != std::string::npos)
- return true;
-
- // Now go through all the string fields to see if the value is there.
- for (int i = STRING_FIELDS_BEGIN; i < STRING_FIELDS_END; ++i) {
- if (StringToLowerASCII(ref(static_cast<StringField>(i))).find(
- lowercase_query) != std::string::npos)
- return true;
- }
-
- for (int i = ID_FIELDS_BEGIN; i < ID_FIELDS_END; ++i) {
- const Id& id = ref(static_cast<IdField>(i));
- if (id.ContainsStringCaseInsensitive(lowercase_query)) {
- return true;
- }
- }
- return false;
-}
-
-namespace {
-
-// Utility function to loop through a set of enum values and add the
-// field keys/values in the kernel to the given dictionary.
-//
-// V should be convertible to Value.
-template <class T, class U, class V>
-void SetFieldValues(const EntryKernel& kernel,
- DictionaryValue* dictionary_value,
- const char* (*enum_key_fn)(T),
- V* (*enum_value_fn)(U),
- int field_key_min, int field_key_max) {
- DCHECK_LE(field_key_min, field_key_max);
- for (int i = field_key_min; i <= field_key_max; ++i) {
- T field = static_cast<T>(i);
- const std::string& key = enum_key_fn(field);
- V* value = enum_value_fn(kernel.ref(field));
- dictionary_value->Set(key, value);
- }
-}
-
-// Helper functions for SetFieldValues().
-
-StringValue* Int64ToValue(int64 i) {
- return Value::CreateStringValue(base::Int64ToString(i));
-}
-
-StringValue* TimeToValue(const base::Time& t) {
- return Value::CreateStringValue(browser_sync::GetTimeDebugString(t));
-}
-
-StringValue* IdToValue(const Id& id) {
- return id.ToValue();
-}
-
-} // namespace
-
-DictionaryValue* EntryKernel::ToValue() const {
- DictionaryValue* kernel_info = new DictionaryValue();
- kernel_info->SetBoolean("isDirty", is_dirty());
- kernel_info->Set("serverModelType", ModelTypeToValue(GetServerModelType()));
-
- // Int64 fields.
- SetFieldValues(*this, kernel_info,
- &GetMetahandleFieldString, &Int64ToValue,
- INT64_FIELDS_BEGIN, META_HANDLE);
- SetFieldValues(*this, kernel_info,
- &GetBaseVersionString, &Int64ToValue,
- META_HANDLE + 1, BASE_VERSION);
- SetFieldValues(*this, kernel_info,
- &GetInt64FieldString, &Int64ToValue,
- BASE_VERSION + 1, INT64_FIELDS_END - 1);
-
- // Time fields.
- SetFieldValues(*this, kernel_info,
- &GetTimeFieldString, &TimeToValue,
- TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
-
- // ID fields.
- SetFieldValues(*this, kernel_info,
- &GetIdFieldString, &IdToValue,
- ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
-
- // Bit fields.
- SetFieldValues(*this, kernel_info,
- &GetIndexedBitFieldString, &Value::CreateBooleanValue,
- BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
- SetFieldValues(*this, kernel_info,
- &GetIsDelFieldString, &Value::CreateBooleanValue,
- INDEXED_BIT_FIELDS_END, IS_DEL);
- SetFieldValues(*this, kernel_info,
- &GetBitFieldString, &Value::CreateBooleanValue,
- IS_DEL + 1, BIT_FIELDS_END - 1);
-
- // String fields.
- {
- // Pick out the function overload we want.
- StringValue* (*string_to_value)(const std::string&) =
- &Value::CreateStringValue;
- SetFieldValues(*this, kernel_info,
- &GetStringFieldString, string_to_value,
- STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
- }
-
- // Proto fields.
- SetFieldValues(*this, kernel_info,
- &GetProtoFieldString, &browser_sync::EntitySpecificsToValue,
- PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
-
- // Bit temps.
- SetFieldValues(*this, kernel_info,
- &GetBitTempString, &Value::CreateBooleanValue,
- BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
-
- return kernel_info;
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Directory
-
-// static
-const FilePath::CharType Directory::kSyncDatabaseFilename[] =
- FILE_PATH_LITERAL("SyncData.sqlite3");
-
-void Directory::InitKernelForTest(
- const std::string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer) {
- DCHECK(!kernel_);
- kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer);
-}
-
-Directory::PersistedKernelInfo::PersistedKernelInfo()
- : next_id(0) {
- for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
- reset_download_progress(ModelTypeFromInt(i));
- }
-}
-
-Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
-
-void Directory::PersistedKernelInfo::reset_download_progress(
- ModelType model_type) {
- download_progress[model_type].set_data_type_id(
- GetSpecificsFieldNumberFromModelType(model_type));
- // An empty-string token indicates no prior knowledge.
- download_progress[model_type].set_token(std::string());
-}
-
-Directory::SaveChangesSnapshot::SaveChangesSnapshot()
- : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
-}
-
-Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {}
-
-Directory::Kernel::Kernel(
- const std::string& name,
- const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer)
- : refcount(1),
- next_write_transaction_id(0),
- name(name),
- metahandles_index(new Directory::MetahandlesIndex),
- ids_index(new Directory::IdsIndex),
- parent_id_child_index(new Directory::ParentIdChildIndex),
- client_tag_index(new Directory::ClientTagIndex),
- unsynced_metahandles(new MetahandleSet),
- dirty_metahandles(new MetahandleSet),
- metahandles_to_purge(new MetahandleSet),
- info_status(Directory::KERNEL_SHARE_INFO_VALID),
- persisted_info(info.kernel_info),
- cache_guid(info.cache_guid),
- next_metahandle(info.max_metahandle + 1),
- delegate(delegate),
- transaction_observer(transaction_observer) {
- DCHECK(delegate);
- DCHECK(transaction_observer.IsInitialized());
-}
-
-void Directory::Kernel::AddRef() {
- base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
-}
-
-void Directory::Kernel::Release() {
- if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
- delete this;
-}
-
-Directory::Kernel::~Kernel() {
- CHECK_EQ(0, refcount);
- delete unsynced_metahandles;
- delete dirty_metahandles;
- delete metahandles_to_purge;
- delete parent_id_child_index;
- delete client_tag_index;
- delete ids_index;
- STLDeleteElements(metahandles_index);
- delete metahandles_index;
-}
-
-Directory::Directory(
- Encryptor* encryptor,
- UnrecoverableErrorHandler* unrecoverable_error_handler,
- ReportUnrecoverableErrorFunction report_unrecoverable_error_function)
- : cryptographer_(encryptor),
- kernel_(NULL),
- store_(NULL),
- unrecoverable_error_handler_(unrecoverable_error_handler),
- report_unrecoverable_error_function_(
- report_unrecoverable_error_function),
- unrecoverable_error_set_(false) {
-}
-
-Directory::~Directory() {
- Close();
-}
-
-DirOpenResult Directory::Open(
- const FilePath& file_path, const string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer) {
- TRACE_EVENT0("sync", "SyncDatabaseOpen");
-
- FilePath db_path(file_path);
- file_util::AbsolutePath(&db_path);
- DirectoryBackingStore* store = new OnDiskDirectoryBackingStore(name, db_path);
-
- const DirOpenResult result =
- OpenImpl(store, name, delegate, transaction_observer);
-
- if (OPENED != result)
- Close();
- return result;
-}
-
-DirOpenResult Directory::OpenInMemoryForTest(
- const string& name, DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer) {
-
- DirectoryBackingStore* store = new InMemoryDirectoryBackingStore(name);
-
- const DirOpenResult result =
- OpenImpl(store, name, delegate, transaction_observer);
- if (OPENED != result)
- Close();
- return result;
-}
-
-void Directory::InitializeIndices() {
- MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
- for (; it != kernel_->metahandles_index->end(); ++it) {
- EntryKernel* entry = *it;
- InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
- kernel_->parent_id_child_index);
- InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
- InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
- const int64 metahandle = entry->ref(META_HANDLE);
- if (entry->ref(IS_UNSYNCED))
- kernel_->unsynced_metahandles->insert(metahandle);
- if (entry->ref(IS_UNAPPLIED_UPDATE)) {
- const ModelType type = entry->GetServerModelType();
- kernel_->unapplied_update_metahandles[type].insert(metahandle);
- }
- DCHECK(!entry->is_dirty());
- }
-}
-
-DirOpenResult Directory::OpenImpl(
- DirectoryBackingStore* store,
- const string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer) {
- DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
- store_ = store;
-
- KernelLoadInfo info;
- // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
- // swap these later.
- MetahandlesIndex metas_bucket;
- DirOpenResult result = store_->Load(&metas_bucket, &info);
- if (OPENED != result)
- return result;
-
- if (!VerifyReferenceIntegrityUnsafe(metas_bucket))
- return FAILED_LOGICAL_CORRUPTION;
-
- kernel_ = new Kernel(name, info, delegate, transaction_observer);
- kernel_->metahandles_index->swap(metas_bucket);
- InitializeIndices();
- return OPENED;
-}
-
-void Directory::Close() {
- if (store_)
- delete store_;
- store_ = NULL;
- if (kernel_) {
- bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
- DCHECK(del) << "Kernel should only have a single ref";
- if (del)
- delete kernel_;
- kernel_ = NULL;
- }
-}
-
-void Directory::OnUnrecoverableError(const BaseTransaction* trans,
- const tracked_objects::Location& location,
- const std::string & message) {
- DCHECK(trans != NULL);
- unrecoverable_error_set_ = true;
- unrecoverable_error_handler_->OnUnrecoverableError(location,
- message);
-}
-
-
-EntryKernel* Directory::GetEntryById(const Id& id) {
- ScopedKernelLock lock(this);
- return GetEntryById(id, &lock);
-}
-
-EntryKernel* Directory::GetEntryById(const Id& id,
- ScopedKernelLock* const lock) {
- DCHECK(kernel_);
- // Find it in the in memory ID index.
- kernel_->needle.put(ID, id);
- IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
- if (id_found != kernel_->ids_index->end()) {
- return *id_found;
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
- ScopedKernelLock lock(this);
- DCHECK(kernel_);
- // Find it in the ClientTagIndex.
- kernel_->needle.put(UNIQUE_CLIENT_TAG, tag);
- ClientTagIndex::iterator found = kernel_->client_tag_index->find(
- &kernel_->needle);
- if (found != kernel_->client_tag_index->end()) {
- return *found;
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
- ScopedKernelLock lock(this);
- DCHECK(kernel_);
- // We don't currently keep a separate index for the tags. Since tags
- // only exist for server created items that are the first items
- // to be created in a store, they should have small metahandles.
- // So, we just iterate over the items in sorted metahandle order,
- // looking for a match.
- MetahandlesIndex& set = *kernel_->metahandles_index;
- for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
- if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) {
- return *i;
- }
- }
- return NULL;
-}
-
-EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
- ScopedKernelLock lock(this);
- return GetEntryByHandle(metahandle, &lock);
-}
-
-EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
- ScopedKernelLock* lock) {
- // Look up in memory
- kernel_->needle.put(META_HANDLE, metahandle);
- MetahandlesIndex::iterator found =
- kernel_->metahandles_index->find(&kernel_->needle);
- if (found != kernel_->metahandles_index->end()) {
- // Found it in memory. Easy.
- return *found;
- }
- return NULL;
-}
-
-bool Directory::GetChildHandlesById(
- BaseTransaction* trans, const Id& parent_id,
- Directory::ChildHandles* result) {
- if (!SyncAssert(this == trans->directory(), FROM_HERE,
- "Directories don't match", trans))
- return false;
- result->clear();
-
- ScopedKernelLock lock(this);
- AppendChildHandles(lock, parent_id, result);
- return true;
-}
-
-bool Directory::GetChildHandlesByHandle(
- BaseTransaction* trans, int64 handle,
- Directory::ChildHandles* result) {
- if (!SyncAssert(this == trans->directory(), FROM_HERE,
- "Directories don't match", trans))
- return false;
-
- result->clear();
-
- ScopedKernelLock lock(this);
- EntryKernel* kernel = GetEntryByHandle(handle, &lock);
- if (!kernel)
- return true;
-
- AppendChildHandles(lock, kernel->ref(ID), result);
- return true;
-}
-
-EntryKernel* Directory::GetRootEntry() {
- return GetEntryById(Id());
-}
-
-bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
- ScopedKernelLock lock(this);
- return InsertEntry(trans, entry, &lock);
-}
-
-bool Directory::InsertEntry(WriteTransaction* trans,
- EntryKernel* entry,
- ScopedKernelLock* lock) {
- DCHECK(NULL != lock);
- if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
- return false;
-
- static const char error[] = "Entry already in memory index.";
- if (!SyncAssert(kernel_->metahandles_index->insert(entry).second,
- FROM_HERE,
- error,
- trans))
- return false;
-
- if (!entry->ref(IS_DEL)) {
- if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second,
- FROM_HERE,
- error,
- trans)) {
- return false;
- }
- }
- if (!SyncAssert(kernel_->ids_index->insert(entry).second,
- FROM_HERE,
- error,
- trans))
- return false;
-
- // Should NEVER be created with a client tag.
- if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
- "Client should be empty", trans))
- return false;
-
- return true;
-}
-
-bool Directory::ReindexId(WriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_id) {
- ScopedKernelLock lock(this);
- if (NULL != GetEntryById(new_id, &lock))
- return false;
-
- {
- // Update the indices that depend on the ID field.
- ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
- kernel_->parent_id_child_index);
- entry->put(ID, new_id);
- }
- return true;
-}
-
-bool Directory::ReindexParentId(WriteTransaction* trans,
- EntryKernel* const entry,
- const Id& new_parent_id) {
- ScopedKernelLock lock(this);
-
- {
- // Update the indices that depend on the PARENT_ID field.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry,
- kernel_->parent_id_child_index);
- entry->put(PARENT_ID, new_parent_id);
- }
- return true;
-}
-
-bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
- DCHECK(trans != NULL);
- return unrecoverable_error_set_;
-}
-
-void Directory::ClearDirtyMetahandles() {
- kernel_->transaction_mutex.AssertAcquired();
- kernel_->dirty_metahandles->clear();
-}
-
-bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
- const EntryKernel* const entry) const {
- bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
- !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
- !entry->ref(IS_UNSYNCED);
-
- if (safe) {
- int64 handle = entry->ref(META_HANDLE);
- const ModelType type = entry->GetServerModelType();
- if (!SyncAssert(kernel_->dirty_metahandles->count(handle) == 0U,
- FROM_HERE,
- "Dirty metahandles should be empty", trans))
- return false;
- // TODO(tim): Bug 49278.
- if (!SyncAssert(!kernel_->unsynced_metahandles->count(handle),
- FROM_HERE,
- "Unsynced handles should be empty",
- trans))
- return false;
- if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
- FROM_HERE,
- "Unapplied metahandles should be empty",
- trans))
- return false;
- }
-
- return safe;
-}
-
-void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
- ReadTransaction trans(FROM_HERE, this);
- ScopedKernelLock lock(this);
-
- // If there is an unrecoverable error then just bail out.
- if (unrecoverable_error_set(&trans))
- return;
-
- // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
- // clear dirty flags.
- for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin();
- i != kernel_->dirty_metahandles->end(); ++i) {
- EntryKernel* entry = GetEntryByHandle(*i, &lock);
- if (!entry)
- continue;
- // Skip over false positives; it happens relatively infrequently.
- if (!entry->is_dirty())
- continue;
- snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
- DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
- // We don't bother removing from the index here as we blow the entire thing
- // in a moment, and it unnecessarily complicates iteration.
- entry->clear_dirty(NULL);
- }
- ClearDirtyMetahandles();
-
- // Set purged handles.
- DCHECK(snapshot->metahandles_to_purge.empty());
- snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
-
- // Fill kernel_info_status and kernel_info.
- snapshot->kernel_info = kernel_->persisted_info;
- // To avoid duplicates when the process crashes, we record the next_id to be
- // greater magnitude than could possibly be reached before the next save
- // changes. In other words, it's effectively impossible for the user to
- // generate 65536 new bookmarks in 3 seconds.
- snapshot->kernel_info.next_id -= 65536;
- snapshot->kernel_info_status = kernel_->info_status;
- // This one we reset on failure.
- kernel_->info_status = KERNEL_SHARE_INFO_VALID;
-}
-
-bool Directory::SaveChanges() {
- bool success = false;
- DCHECK(store_);
-
- base::AutoLock scoped_lock(kernel_->save_changes_mutex);
-
- // Snapshot and save.
- SaveChangesSnapshot snapshot;
- TakeSnapshotForSaveChanges(&snapshot);
- success = store_->SaveChanges(snapshot);
-
- // Handle success or failure.
- if (success)
- success = VacuumAfterSaveChanges(snapshot);
- else
- HandleSaveChangesFailure(snapshot);
- return success;
-}
-
-bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
- if (snapshot.dirty_metas.empty())
- return true;
-
- // Need a write transaction as we are about to permanently purge entries.
- WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
- ScopedKernelLock lock(this);
- // Now drop everything we can out of memory.
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
- MetahandlesIndex::iterator found =
- kernel_->metahandles_index->find(&kernel_->needle);
- EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
- NULL : *found);
- if (entry && SafeToPurgeFromMemory(&trans, entry)) {
- // We now drop deleted metahandles that are up to date on both the client
- // and the server.
- size_t num_erased = 0;
- num_erased = kernel_->ids_index->erase(entry);
- DCHECK_EQ(1u, num_erased);
- num_erased = kernel_->metahandles_index->erase(entry);
- DCHECK_EQ(1u, num_erased);
-
- // Might not be in it
- num_erased = kernel_->client_tag_index->erase(entry);
- DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
- if (!SyncAssert(!kernel_->parent_id_child_index->count(entry),
- FROM_HERE,
- "Deleted entry still present",
- (&trans)))
- return false;
- delete entry;
- }
- if (trans.unrecoverable_error_set())
- return false;
- }
- return true;
-}
-
-void Directory::PurgeEntriesWithTypeIn(ModelTypeSet types) {
- if (types.Empty())
- return;
-
- {
- WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
- {
- ScopedKernelLock lock(this);
- MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
- while (it != kernel_->metahandles_index->end()) {
- const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
- const sync_pb::EntitySpecifics& server_specifics =
- (*it)->ref(SERVER_SPECIFICS);
- ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
- ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
-
- // Note the dance around incrementing |it|, since we sometimes erase().
- if ((IsRealDataType(local_type) && types.Has(local_type)) ||
- (IsRealDataType(server_type) && types.Has(server_type))) {
- if (!UnlinkEntryFromOrder(*it, NULL, &lock))
- return;
-
- int64 handle = (*it)->ref(META_HANDLE);
- kernel_->metahandles_to_purge->insert(handle);
-
- size_t num_erased = 0;
- EntryKernel* entry = *it;
- num_erased = kernel_->ids_index->erase(entry);
- DCHECK_EQ(1u, num_erased);
- num_erased = kernel_->client_tag_index->erase(entry);
- DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
- num_erased = kernel_->unsynced_metahandles->erase(handle);
- DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
- num_erased =
- kernel_->unapplied_update_metahandles[server_type].erase(handle);
- DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
- num_erased = kernel_->parent_id_child_index->erase(entry);
- DCHECK_EQ(entry->ref(IS_DEL), !num_erased);
- kernel_->metahandles_index->erase(it++);
- delete entry;
- } else {
- ++it;
- }
- }
-
- // Ensure meta tracking for these data types reflects the deleted state.
- for (syncable::ModelTypeSet::Iterator it = types.First();
- it.Good(); it.Inc()) {
- set_initial_sync_ended_for_type_unsafe(it.Get(), false);
- kernel_->persisted_info.reset_download_progress(it.Get());
- }
- }
- }
-}
-
-void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
- ScopedKernelLock lock(this);
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-
- // Because we optimistically cleared the dirty bit on the real entries when
- // taking the snapshot, we must restore it on failure. Not doing this could
- // cause lost data, if no other changes are made to the in-memory entries
- // that would cause the dirty bit to get set again. Setting the bit ensures
- // that SaveChanges will at least try again later.
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
- MetahandlesIndex::iterator found =
- kernel_->metahandles_index->find(&kernel_->needle);
- if (found != kernel_->metahandles_index->end()) {
- (*found)->mark_dirty(kernel_->dirty_metahandles);
- }
- }
-
- kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
- snapshot.metahandles_to_purge.end());
-}
-
-void Directory::GetDownloadProgress(
- ModelType model_type,
- sync_pb::DataTypeProgressMarker* value_out) const {
- ScopedKernelLock lock(this);
- return value_out->CopyFrom(
- kernel_->persisted_info.download_progress[model_type]);
-}
-
-void Directory::GetDownloadProgressAsString(
- ModelType model_type,
- std::string* value_out) const {
- ScopedKernelLock lock(this);
- kernel_->persisted_info.download_progress[model_type].SerializeToString(
- value_out);
-}
-
-size_t Directory::GetEntriesCount() const {
- ScopedKernelLock lock(this);
- return kernel_->metahandles_index ? kernel_->metahandles_index->size() : 0;
-}
-
-void Directory::SetDownloadProgress(
- ModelType model_type,
- const sync_pb::DataTypeProgressMarker& new_progress) {
- ScopedKernelLock lock(this);
- kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-bool Directory::initial_sync_ended_for_type(ModelType type) const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.initial_sync_ended.Has(type);
-}
-
-template <class T> void Directory::TestAndSet(
- T* kernel_data, const T* data_to_set) {
- if (*kernel_data != *data_to_set) {
- *kernel_data = *data_to_set;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- }
-}
-
-void Directory::set_initial_sync_ended_for_type(ModelType type, bool x) {
- ScopedKernelLock lock(this);
- set_initial_sync_ended_for_type_unsafe(type, x);
-}
-
-void Directory::set_initial_sync_ended_for_type_unsafe(ModelType type,
- bool x) {
- if (kernel_->persisted_info.initial_sync_ended.Has(type) == x)
- return;
- if (x) {
- kernel_->persisted_info.initial_sync_ended.Put(type);
- } else {
- kernel_->persisted_info.initial_sync_ended.Remove(type);
- }
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-void Directory::SetNotificationStateUnsafe(
- const std::string& notification_state) {
- if (notification_state == kernel_->persisted_info.notification_state)
- return;
- kernel_->persisted_info.notification_state = notification_state;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-string Directory::store_birthday() const {
- ScopedKernelLock lock(this);
- return kernel_->persisted_info.store_birthday;
-}
-
-void Directory::set_store_birthday(const string& store_birthday) {
- ScopedKernelLock lock(this);
- if (kernel_->persisted_info.store_birthday == store_birthday)
- return;
- kernel_->persisted_info.store_birthday = store_birthday;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
-}
-
-std::string Directory::GetNotificationState() const {
- ScopedKernelLock lock(this);
- std::string notification_state = kernel_->persisted_info.notification_state;
- return notification_state;
-}
-
-void Directory::SetNotificationState(const std::string& notification_state) {
- ScopedKernelLock lock(this);
- SetNotificationStateUnsafe(notification_state);
-}
-
-string Directory::cache_guid() const {
- // No need to lock since nothing ever writes to it after load.
- return kernel_->cache_guid;
-}
-
-browser_sync::Cryptographer* Directory::GetCryptographer(
- const BaseTransaction* trans) {
- DCHECK_EQ(this, trans->directory());
- return &cryptographer_;
-}
-
-void Directory::GetAllMetaHandles(BaseTransaction* trans,
- MetahandleSet* result) {
- result->clear();
- ScopedKernelLock lock(this);
- MetahandlesIndex::iterator i;
- for (i = kernel_->metahandles_index->begin();
- i != kernel_->metahandles_index->end();
- ++i) {
- result->insert((*i)->ref(META_HANDLE));
- }
-}
-
-void Directory::GetAllEntryKernels(BaseTransaction* trans,
- std::vector<const EntryKernel*>* result) {
- result->clear();
- ScopedKernelLock lock(this);
- result->insert(result->end(),
- kernel_->metahandles_index->begin(),
- kernel_->metahandles_index->end());
-}
-
-void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
- UnsyncedMetaHandles* result) {
- result->clear();
- ScopedKernelLock lock(this);
- copy(kernel_->unsynced_metahandles->begin(),
- kernel_->unsynced_metahandles->end(), back_inserter(*result));
-}
-
-int64 Directory::unsynced_entity_count() const {
- ScopedKernelLock lock(this);
- return kernel_->unsynced_metahandles->size();
-}
-
-FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
- BaseTransaction* trans) const {
- syncable::FullModelTypeSet server_types;
- ScopedKernelLock lock(this);
- for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
- const ModelType type = ModelTypeFromInt(i);
- if (!kernel_->unapplied_update_metahandles[type].empty()) {
- server_types.Put(type);
- }
- }
- return server_types;
-}
-
-void Directory::GetUnappliedUpdateMetaHandles(
- BaseTransaction* trans,
- FullModelTypeSet server_types,
- UnappliedUpdateMetaHandles* result) {
- result->clear();
- ScopedKernelLock lock(this);
- for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
- const ModelType type = ModelTypeFromInt(i);
- if (server_types.Has(type)) {
- std::copy(kernel_->unapplied_update_metahandles[type].begin(),
- kernel_->unapplied_update_metahandles[type].end(),
- back_inserter(*result));
- }
- }
-}
-
-
-class IdFilter {
- public:
- virtual ~IdFilter() { }
- virtual bool ShouldConsider(const Id& id) const = 0;
-};
-
-
-class FullScanFilter : public IdFilter {
- public:
- virtual bool ShouldConsider(const Id& id) const {
- return true;
- }
-};
-
-class SomeIdsFilter : public IdFilter {
- public:
- virtual bool ShouldConsider(const Id& id) const {
- return std::binary_search(ids_.begin(), ids_.end(), id);
- }
- std::vector<Id> ids_;
-};
-
-bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations) {
- MetahandleSet handles;
- SomeIdsFilter filter;
- filter.ids_.reserve(mutations.size());
- for (EntryKernelMutationMap::const_iterator it = mutations.begin(),
- end = mutations.end(); it != end; ++it) {
- filter.ids_.push_back(it->second.mutated.ref(ID));
- handles.insert(it->first);
- }
- std::sort(filter.ids_.begin(), filter.ids_.end());
- if (!CheckTreeInvariants(trans, handles, filter))
- return false;
- return true;
-}
-
-bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
- bool full_scan) {
- // TODO(timsteele): This is called every time a WriteTransaction finishes.
- // The performance hit is substantial given that we now examine every single
- // syncable entry. Need to redesign this.
- MetahandleSet handles;
- GetAllMetaHandles(trans, &handles);
- if (full_scan) {
- FullScanFilter fullfilter;
- if (!CheckTreeInvariants(trans, handles, fullfilter))
- return false;
- } else {
- SomeIdsFilter filter;
- MetahandleSet::iterator i;
- for (i = handles.begin() ; i != handles.end() ; ++i) {
- Entry e(trans, GET_BY_HANDLE, *i);
- if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
- return false;
- filter.ids_.push_back(e.Get(ID));
- }
- std::sort(filter.ids_.begin(), filter.ids_.end());
- if (!CheckTreeInvariants(trans, handles, filter))
- return false;
- }
- return true;
-}
-
-bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
- const MetahandleSet& handles,
- const IdFilter& idfilter) {
- const int64 max_ms = kInvariantCheckMaxMs;
- PerfTimer check_timer;
- MetahandleSet::const_iterator i;
- int entries_done = 0;
- for (i = handles.begin() ; i != handles.end() ; ++i) {
- int64 metahandle = *i;
- Entry e(trans, GET_BY_HANDLE, metahandle);
- if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
- return false;
- syncable::Id id = e.Get(ID);
- syncable::Id parentid = e.Get(PARENT_ID);
-
- if (id.IsRoot()) {
- if (!SyncAssert(e.Get(IS_DIR), FROM_HERE,
- "Entry should be a directory",
- trans))
- return false;
- if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
- "Entry should be root",
- trans))
- return false;
- if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE,
- "Entry should be sycned",
- trans))
- return false;
- ++entries_done;
- continue;
- }
-
- if (!e.Get(IS_DEL)) {
- if (!SyncAssert(id != parentid, FROM_HERE,
- "Id should be different from parent id.",
- trans))
- return false;
- if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE,
- "Non unique name should not be empty.",
- trans))
- return false;
- int safety_count = handles.size() + 1;
- while (!parentid.IsRoot()) {
- if (!idfilter.ShouldConsider(parentid))
- break;
- Entry parent(trans, GET_BY_ID, parentid);
- if (!SyncAssert(parent.good(), FROM_HERE,
- "Parent entry is not valid.",
- trans))
- return false;
- if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE,
- "Parent should be a directory",
- trans))
- return false;
- if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE,
- "Parent should not have been marked for deletion.",
- trans))
- return false;
- if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)),
- FROM_HERE,
- "Parent should be in the index.",
- trans))
- return false;
- parentid = parent.Get(PARENT_ID);
- if (!SyncAssert(--safety_count > 0, FROM_HERE,
- "Count should be greater than zero.",
- trans))
- return false;
- }
- }
- int64 base_version = e.Get(BASE_VERSION);
- int64 server_version = e.Get(SERVER_VERSION);
- bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
- if (CHANGES_VERSION == base_version || 0 == base_version) {
- if (e.Get(IS_UNAPPLIED_UPDATE)) {
- // Must be a new item, or a de-duplicated unique client tag
- // that was created both locally and remotely.
- if (!using_unique_client_tag) {
- if (!SyncAssert(e.Get(IS_DEL), FROM_HERE,
- "The entry should not have been deleted.",
- trans))
- return false;
- }
- // It came from the server, so it must have a server ID.
- if (!SyncAssert(id.ServerKnows(), FROM_HERE,
- "The id should be from a server.",
- trans))
- return false;
- } else {
- if (e.Get(IS_DIR)) {
- // TODO(chron): Implement this mode if clients ever need it.
- // For now, you can't combine a client tag and a directory.
- if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
- "Directory cannot have a client tag.",
- trans))
- return false;
- }
- // Should be an uncomitted item, or a successfully deleted one.
- if (!e.Get(IS_DEL)) {
- if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE,
- "The item should be unsynced.",
- trans))
- return false;
- }
- // If the next check failed, it would imply that an item exists
- // on the server, isn't waiting for application locally, but either
- // is an unsynced create or a sucessful delete in the local copy.
- // Either way, that's a mismatch.
- if (!SyncAssert(0 == server_version, FROM_HERE,
- "Server version should be zero.",
- trans))
- return false;
- // Items that aren't using the unique client tag should have a zero
- // base version only if they have a local ID. Items with unique client
- // tags are allowed to use the zero base version for undeletion and
- // de-duplication; the unique client tag trumps the server ID.
- if (!using_unique_client_tag) {
- if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
- "Should be a client only id.",
- trans))
- return false;
- }
- }
- } else {
- if (!SyncAssert(id.ServerKnows(),
- FROM_HERE,
- "Should be a server id.",
- trans))
- return false;
- }
- ++entries_done;
- int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
- if (elapsed_ms > max_ms) {
- DVLOG(1) << "Cutting Invariant check short after " << elapsed_ms
- << "ms. Processed " << entries_done << "/" << handles.size()
- << " entries";
- return true;
- }
-
- }
- return true;
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// ScopedKernelLock
-
-ScopedKernelLock::ScopedKernelLock(const Directory* dir)
- : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Transactions
-
-void BaseTransaction::Lock() {
- TRACE_EVENT2("sync_lock_contention", "AcquireLock",
- "src_file", from_here_.file_name(),
- "src_func", from_here_.function_name());
-
- dirkernel_->transaction_mutex.Acquire();
-}
-
-void BaseTransaction::Unlock() {
- dirkernel_->transaction_mutex.Release();
-}
-
-void BaseTransaction::OnUnrecoverableError(
- const tracked_objects::Location& location,
- const std::string& message) {
- unrecoverable_error_set_ = true;
- unrecoverable_error_location_ = location;
- unrecoverable_error_msg_ = message;
-
- // Note: We dont call the Directory's OnUnrecoverableError method right
- // away. Instead we wait to unwind the stack and in the destructor of the
- // transaction we would call the OnUnrecoverableError method.
-
- directory()->ReportUnrecoverableError();
-}
-
-bool BaseTransaction::unrecoverable_error_set() const {
- return unrecoverable_error_set_;
-}
-
-void BaseTransaction::HandleUnrecoverableErrorIfSet() {
- if (unrecoverable_error_set_) {
- directory()->OnUnrecoverableError(this,
- unrecoverable_error_location_,
- unrecoverable_error_msg_);
- }
-}
-
-BaseTransaction::BaseTransaction(const tracked_objects::Location& from_here,
- const char* name,
- WriterTag writer,
- Directory* directory)
- : from_here_(from_here), name_(name), writer_(writer),
- directory_(directory), dirkernel_(directory->kernel_),
- unrecoverable_error_set_(false) {
- // TODO(lipalani): Don't issue a good transaction if the directory has
- // unrecoverable error set. And the callers have to check trans.good before
- // proceeding.
- TRACE_EVENT_BEGIN2("sync", name_,
- "src_file", from_here_.file_name(),
- "src_func", from_here_.function_name());
-}
-
-BaseTransaction::~BaseTransaction() {
- TRACE_EVENT_END0("sync", name_);
-}
-
-ReadTransaction::ReadTransaction(const tracked_objects::Location& location,
- Directory* directory)
- : BaseTransaction(location, "ReadTransaction", INVALID, directory) {
- Lock();
-}
-
-ReadTransaction::~ReadTransaction() {
- HandleUnrecoverableErrorIfSet();
- Unlock();
-}
-
-WriteTransaction::WriteTransaction(const tracked_objects::Location& location,
- WriterTag writer, Directory* directory)
- : BaseTransaction(location, "WriteTransaction", writer, directory) {
- Lock();
-}
-
-void WriteTransaction::SaveOriginal(const EntryKernel* entry) {
- if (!entry) {
- return;
- }
- // Insert only if it's not already there.
- const int64 handle = entry->ref(META_HANDLE);
- EntryKernelMutationMap::iterator it = mutations_.lower_bound(handle);
- if (it == mutations_.end() || it->first != handle) {
- EntryKernelMutation mutation;
- mutation.original = *entry;
- ignore_result(mutations_.insert(it, std::make_pair(handle, mutation)));
- }
-}
-
-ImmutableEntryKernelMutationMap WriteTransaction::RecordMutations() {
- dirkernel_->transaction_mutex.AssertAcquired();
- for (syncable::EntryKernelMutationMap::iterator it = mutations_.begin();
- it != mutations_.end();) {
- EntryKernel* kernel = directory()->GetEntryByHandle(it->first);
- if (!kernel) {
- NOTREACHED();
- continue;
- }
- if (kernel->is_dirty()) {
- it->second.mutated = *kernel;
- ++it;
- } else {
- DCHECK(!it->second.original.is_dirty());
- // Not actually mutated, so erase from |mutations_|.
- mutations_.erase(it++);
- }
- }
- return ImmutableEntryKernelMutationMap(&mutations_);
-}
-
-void WriteTransaction::UnlockAndNotify(
- const ImmutableEntryKernelMutationMap& mutations) {
- // Work while transaction mutex is held.
- ModelTypeSet models_with_changes;
- bool has_mutations = !mutations.Get().empty();
- if (has_mutations) {
- models_with_changes = NotifyTransactionChangingAndEnding(mutations);
- }
- Unlock();
-
- // Work after mutex is relased.
- if (has_mutations) {
- NotifyTransactionComplete(models_with_changes);
- }
-}
-
-ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
- const ImmutableEntryKernelMutationMap& mutations) {
- dirkernel_->transaction_mutex.AssertAcquired();
- DCHECK(!mutations.Get().empty());
-
- WriteTransactionInfo write_transaction_info(
- dirkernel_->next_write_transaction_id, from_here_, writer_, mutations);
- ++dirkernel_->next_write_transaction_id;
-
- ImmutableWriteTransactionInfo immutable_write_transaction_info(
- &write_transaction_info);
- DirectoryChangeDelegate* const delegate = dirkernel_->delegate;
- if (writer_ == syncable::SYNCAPI) {
- delegate->HandleCalculateChangesChangeEventFromSyncApi(
- immutable_write_transaction_info, this);
- } else {
- delegate->HandleCalculateChangesChangeEventFromSyncer(
- immutable_write_transaction_info, this);
- }
-
- ModelTypeSet models_with_changes =
- delegate->HandleTransactionEndingChangeEvent(
- immutable_write_transaction_info, this);
-
- dirkernel_->transaction_observer.Call(FROM_HERE,
- &TransactionObserver::OnTransactionWrite,
- immutable_write_transaction_info, models_with_changes);
-
- return models_with_changes;
-}
-
-void WriteTransaction::NotifyTransactionComplete(
- ModelTypeSet models_with_changes) {
- dirkernel_->delegate->HandleTransactionCompleteChangeEvent(
- models_with_changes);
-}
-
-WriteTransaction::~WriteTransaction() {
- const ImmutableEntryKernelMutationMap& mutations = RecordMutations();
-
- if (!unrecoverable_error_set_) {
- if (OFF != kInvariantCheckLevel) {
- const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
- if (full_scan)
- directory()->CheckTreeInvariants(this, full_scan);
- else
- directory()->CheckTreeInvariants(this, mutations.Get());
- }
- }
-
- // |CheckTreeInvariants| could have thrown an unrecoverable error.
- if (unrecoverable_error_set_) {
- HandleUnrecoverableErrorIfSet();
- Unlock();
- return;
- }
-
- UnlockAndNotify(mutations);
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Entry
-
-Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryById(id);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByClientTag(tag);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByServerTag(tag);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByHandle(metahandle);
-}
-
-Directory* Entry::dir() const {
- return basetrans_->directory();
-}
-
-Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
- return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
-}
-
-DictionaryValue* Entry::ToValue() const {
- DictionaryValue* entry_info = new DictionaryValue();
- entry_info->SetBoolean("good", good());
- if (good()) {
- entry_info->Set("kernel", kernel_->ToValue());
- entry_info->Set("modelType",
- ModelTypeToValue(GetModelType()));
- entry_info->SetBoolean("existsOnClientBecauseNameIsNonEmpty",
- ExistsOnClientBecauseNameIsNonEmpty());
- entry_info->SetBoolean("isRoot", IsRoot());
- }
- return entry_info;
-}
-
-const string& Entry::Get(StringField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
-}
-
-syncable::ModelType Entry::GetServerModelType() const {
- ModelType specifics_type = kernel_->GetServerModelType();
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
-
- // Otherwise, we don't have a server type yet. That should only happen
- // if the item is an uncommitted locally created item.
- // It's possible we'll need to relax these checks in the future; they're
- // just here for now as a safety measure.
- DCHECK(Get(IS_UNSYNCED));
- DCHECK_EQ(Get(SERVER_VERSION), 0);
- DCHECK(Get(SERVER_IS_DEL));
- // Note: can't enforce !Get(ID).ServerKnows() here because that could
- // actually happen if we hit AttemptReuniteLostCommitResponses.
- return UNSPECIFIED;
-}
-
-syncable::ModelType Entry::GetModelType() const {
- ModelType specifics_type = GetModelTypeFromSpecifics(Get(SPECIFICS));
- if (specifics_type != UNSPECIFIED)
- return specifics_type;
- if (IsRoot())
- return TOP_LEVEL_FOLDER;
- // Loose check for server-created top-level folders that aren't
- // bound to a particular model type.
- if (!Get(UNIQUE_SERVER_TAG).empty() && Get(IS_DIR))
- return TOP_LEVEL_FOLDER;
-
- return UNSPECIFIED;
-}
-
-///////////////////////////////////////////////////////////////////////////
-// MutableEntry
-
-MutableEntry::MutableEntry(WriteTransaction* trans, Create,
- const Id& parent_id, const string& name)
- : Entry(trans),
- write_transaction_(trans) {
- Init(trans, parent_id, name);
-}
-
-
-void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
- const string& name) {
- scoped_ptr<EntryKernel> kernel(new EntryKernel);
- kernel_ = NULL;
-
- kernel->put(ID, trans->directory_->NextId());
- kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
- kernel->put(PARENT_ID, parent_id);
- kernel->put(NON_UNIQUE_NAME, name);
- const base::Time& now = base::Time::Now();
- kernel->put(CTIME, now);
- kernel->put(MTIME, now);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
- if (!trans->directory()->InsertEntry(trans, kernel.get())) {
- return; // We failed inserting, nothing more to do.
- }
- // Because this entry is new, it was originally deleted.
- kernel->put(IS_DEL, true);
- trans->SaveOriginal(kernel.get());
- kernel->put(IS_DEL, false);
-
- // Now swap the pointers.
- kernel_ = kernel.release();
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
- const Id& id)
- : Entry(trans), write_transaction_(trans) {
- Entry same_id(trans, GET_BY_ID, id);
- kernel_ = NULL;
- if (same_id.good()) {
- return; // already have an item with this ID.
- }
- scoped_ptr<EntryKernel> kernel(new EntryKernel());
-
- kernel->put(ID, id);
- kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
- kernel->put(IS_DEL, true);
- // We match the database defaults here
- kernel->put(BASE_VERSION, CHANGES_VERSION);
- if (!trans->directory()->InsertEntry(trans, kernel.get())) {
- return; // Failed inserting.
- }
- trans->SaveOriginal(kernel.get());
-
- kernel_ = kernel.release();
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
- : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
- int64 metahandle)
- : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
- const std::string& tag)
- : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
- const string& tag)
- : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-bool MutableEntry::PutIsDel(bool is_del) {
- DCHECK(kernel_);
- if (is_del == kernel_->ref(IS_DEL)) {
- return true;
- }
- if (is_del) {
- if (!UnlinkFromOrder()) {
- return false;
- }
- }
-
- {
- ScopedKernelLock lock(dir());
- // Some indices don't include deleted items and must be updated
- // upon a value change.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
- dir()->kernel_->parent_id_child_index);
-
- kernel_->put(IS_DEL, is_del);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
-
- if (!is_del)
- // Restores position to the 0th index.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
-
- return true;
-}
-
-bool MutableEntry::Put(Int64Field field, const int64& value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- ScopedKernelLock lock(dir());
- if (SERVER_POSITION_IN_PARENT == field) {
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
- dir()->kernel_->parent_id_child_index);
- kernel_->put(field, value);
- } else {
- kernel_->put(field, value);
- }
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::Put(TimeField field, const base::Time& value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- kernel_->put(field, value);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::Put(IdField field, const Id& value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- if (ID == field) {
- if (!dir()->ReindexId(write_transaction(), kernel_, value))
- return false;
- } else if (PARENT_ID == field) {
- PutParentIdPropertyOnly(value); // Makes sibling order inconsistent.
- // Fixes up the sibling order inconsistency.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
- } else {
- kernel_->put(field, value);
- }
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
- dir()->ReindexParentId(write_transaction(), kernel_, parent_id);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
-}
-
-bool MutableEntry::Put(BaseVersion field, int64 value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- kernel_->put(field, value);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::Put(StringField field, const string& value) {
- return PutImpl(field, value);
-}
-
-bool MutableEntry::Put(ProtoField field,
- const sync_pb::EntitySpecifics& value) {
- DCHECK(kernel_);
- // TODO(ncarter): This is unfortunately heavyweight. Can we do
- // better?
- if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) {
- const bool update_unapplied_updates_index =
- (field == SERVER_SPECIFICS) && kernel_->ref(IS_UNAPPLIED_UPDATE);
- if (update_unapplied_updates_index) {
- // Remove ourselves from unapplied_update_metahandles with our
- // old server type.
- const syncable::ModelType old_server_type =
- kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- size_t erase_count =
- dir()->kernel_->unapplied_update_metahandles[old_server_type]
- .erase(metahandle);
- DCHECK_EQ(erase_count, 1u);
- }
-
- kernel_->put(field, value);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
-
- if (update_unapplied_updates_index) {
- // Add ourselves back into unapplied_update_metahandles with our
- // new server type.
- const syncable::ModelType new_server_type =
- kernel_->GetServerModelType();
- const int64 metahandle = kernel_->ref(META_HANDLE);
- dir()->kernel_->unapplied_update_metahandles[new_server_type]
- .insert(metahandle);
- }
- }
- return true;
-}
-
-bool MutableEntry::Put(BitField field, bool value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- kernel_->put(field, value);
- kernel_->mark_dirty(GetDirtyIndexHelper());
- }
- return true;
-}
-
-MetahandleSet* MutableEntry::GetDirtyIndexHelper() {
- return dir()->kernel_->dirty_metahandles;
-}
-
-bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
- // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID.
- string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG);
- if (old_tag == new_tag) {
- return true;
- }
-
- ScopedKernelLock lock(dir());
- if (!new_tag.empty()) {
- // Make sure your new value is not in there already.
- EntryKernel lookup_kernel_ = *kernel_;
- lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag);
- bool new_tag_conflicts =
- (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0);
- if (new_tag_conflicts) {
- return false;
- }
- }
-
- {
- ScopedIndexUpdater<ClientTagIndexer> index_updater(lock, kernel_,
- dir()->kernel_->client_tag_index);
- kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::PutImpl(StringField field, const string& value) {
- DCHECK(kernel_);
- if (field == UNIQUE_CLIENT_TAG) {
- return PutUniqueClientTag(value);
- }
-
- if (kernel_->ref(field) != value) {
- kernel_->put(field, value);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::Put(IndexedBitField field, bool value) {
- DCHECK(kernel_);
- if (kernel_->ref(field) != value) {
- MetahandleSet* index;
- if (IS_UNSYNCED == field) {
- index = dir()->kernel_->unsynced_metahandles;
- } else {
- // Use kernel_->GetServerModelType() instead of
- // GetServerModelType() as we may trigger some DCHECKs in the
- // latter.
- index =
- &dir()->kernel_->unapplied_update_metahandles[
- kernel_->GetServerModelType()];
- }
-
- ScopedKernelLock lock(dir());
- if (value) {
- if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
- FROM_HERE,
- "Could not insert",
- write_transaction())) {
- return false;
- }
- } else {
- if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
- FROM_HERE,
- "Entry Not succesfully erased",
- write_transaction())) {
- return false;
- }
- }
- kernel_->put(field, value);
- kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::UnlinkFromOrder() {
- ScopedKernelLock lock(dir());
- return dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock);
-}
-
-bool Directory::UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock) {
- if (!SyncAssert(!trans || this == trans->directory(),
- FROM_HERE,
- "Transaction not pointing to the right directory",
- trans))
- return false;
- Id old_previous = entry->ref(PREV_ID);
- Id old_next = entry->ref(NEXT_ID);
-
- entry->put(NEXT_ID, entry->ref(ID));
- entry->put(PREV_ID, entry->ref(ID));
- entry->mark_dirty(kernel_->dirty_metahandles);
-
- if (!old_previous.IsRoot()) {
- if (old_previous == old_next) {
- // Note previous == next doesn't imply previous == next == Get(ID). We
- // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
- // and deleted before receiving the server ID in the commit response.
- if (!SyncAssert(
- (old_next == entry->ref(ID)) || !old_next.ServerKnows(),
- FROM_HERE,
- "Encounteered inconsistent entry while deleting",
- trans)) {
- return false;
- }
- return true; // Done if we were already self-looped (hence unlinked).
- }
- EntryKernel* previous_entry = GetEntryById(old_previous, lock);
- ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
- // TODO(tim): Multiple asserts here for bug 101039 investigation.
- if (type == AUTOFILL) {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous autofill entry",
- trans)) {
- return false;
- }
- } else {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous entry",
- trans)) {
- return false;
- }
- }
- if (trans)
- trans->SaveOriginal(previous_entry);
- previous_entry->put(NEXT_ID, old_next);
- previous_entry->mark_dirty(kernel_->dirty_metahandles);
- }
-
- if (!old_next.IsRoot()) {
- EntryKernel* next_entry = GetEntryById(old_next, lock);
- if (!SyncAssert(next_entry != NULL,
- FROM_HERE,
- "Could not find next entry",
- trans)) {
- return false;
- }
- if (trans)
- trans->SaveOriginal(next_entry);
- next_entry->put(PREV_ID, old_previous);
- next_entry->mark_dirty(kernel_->dirty_metahandles);
- }
- return true;
-}
-
-bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
- if (!UnlinkFromOrder())
- return false;
-
- if (Get(IS_DEL)) {
- DCHECK(predecessor_id.IsNull());
- return true;
- }
-
- // TODO(ncarter): It should be possible to not maintain position for
- // non-bookmark items. However, we'd need to robustly handle all possible
- // permutations of setting IS_DEL and the SPECIFICS to identify the
- // object type; or else, we'd need to add a ModelType to the
- // MutableEntry's Create ctor.
- // if (!ShouldMaintainPosition()) {
- // return false;
- // }
-
- // This is classic insert-into-doubly-linked-list from CS 101 and your last
- // job interview. An "IsRoot" Id signifies the head or tail.
- Id successor_id;
- if (!predecessor_id.IsRoot()) {
- MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
- if (!predecessor.good()) {
- LOG(ERROR) << "Predecessor is not good : "
- << predecessor_id.GetServerId();
- return false;
- }
- if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor_id = predecessor.Get(NEXT_ID);
- predecessor.Put(NEXT_ID, Get(ID));
- } else {
- syncable::Directory* dir = trans()->directory();
- if (!dir->GetFirstChildId(trans(), Get(PARENT_ID), &successor_id)) {
- return false;
- }
- }
- if (!successor_id.IsRoot()) {
- MutableEntry successor(write_transaction(), GET_BY_ID, successor_id);
- if (!successor.good()) {
- LOG(ERROR) << "Successor is not good: "
- << successor_id.GetServerId();
- return false;
- }
- if (successor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor.Put(PREV_ID, Get(ID));
- }
- DCHECK(predecessor_id != Get(ID));
- DCHECK(successor_id != Get(ID));
- Put(PREV_ID, predecessor_id);
- Put(NEXT_ID, successor_id);
- return true;
-}
-
-bool MutableEntry::Put(BitTemp field, bool value) {
- DCHECK(kernel_);
- kernel_->put(field, value);
- return true;
-}
-
-///////////////////////////////////////////////////////////////////////////
-// High-level functions
-
-int64 Directory::NextMetahandle() {
- ScopedKernelLock lock(this);
- int64 metahandle = (kernel_->next_metahandle)++;
- return metahandle;
-}
-
-// Always returns a client ID that is the string representation of a negative
-// number.
-Id Directory::NextId() {
- int64 result;
- {
- ScopedKernelLock lock(this);
- result = (kernel_->persisted_info.next_id)--;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- }
- DCHECK_LT(result, 0);
- return Id::CreateFromClientString(base::Int64ToString(result));
-}
-
-bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
- ScopedKernelLock lock(this);
- return (GetPossibleFirstChild(lock, id) != NULL);
-}
-
-bool Directory::GetFirstChildId(BaseTransaction* trans,
- const Id& parent_id,
- Id* first_child_id) {
- ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
- if (!entry) {
- *first_child_id = Id();
- return true;
- }
-
- // Walk to the front of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(PREV_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(PREV_ID), &lock);
- if (!entry) {
- *first_child_id = Id();
- return false;
- }
- }
- *first_child_id = entry->ref(ID);
- return true;
-}
-
-bool Directory::GetLastChildIdForTest(
- BaseTransaction* trans, const Id& parent_id, Id* last_child_id) {
- ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleLastChildForTest(lock, parent_id);
- if (!entry) {
- *last_child_id = Id();
- return true;
- }
-
- // Walk to the back of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(NEXT_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(NEXT_ID), &lock);
- if (!entry) {
- *last_child_id = Id();
- return false;
- }
- }
-
- *last_child_id = entry->ref(ID);
- return true;
-}
-
-Id Directory::ComputePrevIdFromServerPosition(
- const EntryKernel* entry,
- const syncable::Id& parent_id) {
- ScopedKernelLock lock(this);
-
- // Find the natural insertion point in the parent_id_child_index, and
- // work back from there, filtering out ineligible candidates.
- ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock,
- parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID));
- ParentIdChildIndex::iterator first_sibling =
- GetParentChildIndexLowerBound(lock, parent_id);
-
- while (sibling != first_sibling) {
- --sibling;
- EntryKernel* candidate = *sibling;
-
- // The item itself should never be in the range under consideration.
- DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
-
- // Ignore unapplied updates -- they might not even be server-siblings.
- if (candidate->ref(IS_UNAPPLIED_UPDATE))
- continue;
-
- // We can't trust the SERVER_ fields of unsynced items, but they are
- // potentially legitimate local predecessors. In the case where
- // |update_item| and an unsynced item wind up in the same insertion
- // position, we need to choose how to order them. The following check puts
- // the unapplied update first; removing it would put the unsynced item(s)
- // first.
- if (candidate->ref(IS_UNSYNCED))
- continue;
-
- // Skip over self-looped items, which are not valid predecessors. This
- // shouldn't happen in practice, but is worth defending against.
- if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
- !candidate->ref(PREV_ID).IsRoot()) {
- NOTREACHED();
- continue;
- }
- return candidate->ref(ID);
- }
- // This item will be the first in the sibling order.
- return Id();
-}
-
-bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
- const Id& new_parent_id) {
- if (entry_id.IsRoot())
- return false;
- // we have to ensure that the entry is not an ancestor of the new parent.
- Id ancestor_id = new_parent_id;
- while (!ancestor_id.IsRoot()) {
- if (entry_id == ancestor_id)
- return false;
- Entry new_parent(trans, GET_BY_ID, ancestor_id);
- if (!SyncAssert(new_parent.good(),
- FROM_HERE,
- "Invalid new parent",
- trans))
- return false;
- ancestor_id = new_parent.Get(PARENT_ID);
- }
- return true;
-}
-
-// This function sets only the flags needed to get this entry to sync.
-bool MarkForSyncing(syncable::MutableEntry* e) {
- DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
- DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
- if (!(e->Put(IS_UNSYNCED, true)))
- return false;
- e->Put(SYNCING, false);
- return true;
-}
-
-std::ostream& operator<<(std::ostream& os, const Entry& entry) {
- int i;
- EntryKernel* const kernel = entry.kernel_;
- for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<Int64Field>(i)) << ", ";
- }
- for ( ; i < TIME_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << browser_sync::GetTimeDebugString(
- kernel->ref(static_cast<TimeField>(i))) << ", ";
- }
- for ( ; i < ID_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<IdField>(i)) << ", ";
- }
- os << "Flags: ";
- for ( ; i < BIT_FIELDS_END; ++i) {
- if (kernel->ref(static_cast<BitField>(i)))
- os << g_metas_columns[i].name << ", ";
- }
- for ( ; i < STRING_FIELDS_END; ++i) {
- const string& field = kernel->ref(static_cast<StringField>(i));
- os << g_metas_columns[i].name << ": " << field << ", ";
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- os << g_metas_columns[i].name << ": "
- << net::EscapePath(
- kernel->ref(static_cast<ProtoField>(i)).SerializeAsString())
- << ", ";
- }
- os << "TempFlags: ";
- for ( ; i < BIT_TEMPS_END; ++i) {
- if (kernel->ref(static_cast<BitTemp>(i)))
- os << "#" << i - BIT_TEMPS_BEGIN << ", ";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& s, const Blob& blob) {
- for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
- s << std::hex << std::setw(2)
- << std::setfill('0') << static_cast<unsigned int>(*i);
- return s << std::dec;
-}
-
-Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking) {
- kernel_->needle.put(PARENT_ID, parent_id);
- kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent);
- kernel_->needle.put(ID, item_id_for_tiebreaking);
- return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
-}
-
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // Peg the parent ID, and use the least values for the remaining
- // index variables.
- return LocateInParentChildIndex(lock, parent_id,
- std::numeric_limits<int64>::min(),
- Id::GetLeastIdForLexicographicComparison());
-}
-
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // The upper bound of |parent_id|'s range is the lower
- // bound of |++parent_id|'s range.
- return GetParentChildIndexLowerBound(lock,
- parent_id.GetLexicographicSuccessor());
-}
-
-void Directory::AppendChildHandles(const ScopedKernelLock& lock,
- const Id& parent_id,
- Directory::ChildHandles* result) {
- typedef ParentIdChildIndex::iterator iterator;
- CHECK(result);
- for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
- end = GetParentChildIndexUpperBound(lock, parent_id);
- i != end; ++i) {
- DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
- result->push_back((*i)->ref(META_HANDLE));
- }
-}
-
-EntryKernel* Directory::GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id) {
- // We can use the server positional ordering as a hint because it's generally
- // in sync with the local (linked-list) positional ordering, and we have an
- // index on it.
- ParentIdChildIndex::iterator candidate =
- GetParentChildIndexLowerBound(lock, parent_id);
- ParentIdChildIndex::iterator end_range =
- GetParentChildIndexUpperBound(lock, parent_id);
- for (; candidate != end_range; ++candidate) {
- EntryKernel* entry = *candidate;
- // Filter out self-looped items, which are temporarily not in the child
- // ordering.
- if (entry->ref(PREV_ID).IsRoot() ||
- entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
- return entry;
- }
- }
- // There were no children in the linked list.
- return NULL;
-}
-
-EntryKernel* Directory::GetPossibleLastChildForTest(
- const ScopedKernelLock& lock, const Id& parent_id) {
- // We can use the server positional ordering as a hint because it's generally
- // in sync with the local (linked-list) positional ordering, and we have an
- // index on it.
- ParentIdChildIndex::iterator begin_range =
- GetParentChildIndexLowerBound(lock, parent_id);
- ParentIdChildIndex::iterator candidate =
- GetParentChildIndexUpperBound(lock, parent_id);
-
- while (begin_range != candidate) {
- --candidate;
- EntryKernel* entry = *candidate;
-
- // Filter out self-looped items, which are temporarily not in the child
- // ordering.
- if (entry->ref(NEXT_ID).IsRoot() ||
- entry->ref(NEXT_ID) != entry->ref(PREV_ID)) {
- return entry;
- }
- }
- // There were no children in the linked list.
- return NULL;
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
deleted file mode 100644
index 344e7e0..0000000
--- a/chrome/browser/sync/syncable/syncable.h
+++ /dev/null
@@ -1,1349 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
-#pragma once
-
-#include <algorithm>
-#include <bitset>
-#include <cstddef>
-#include <iosfwd>
-#include <limits>
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/file_path.h"
-#include "base/gtest_prod_util.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "base/time.h"
-#include "chrome/browser/sync/internal_api/includes/report_unrecoverable_error_function.h"
-#include "chrome/browser/sync/internal_api/includes/unrecoverable_error_handler.h"
-#include "chrome/browser/sync/syncable/blob.h"
-#include "chrome/browser/sync/syncable/dir_open_result.h"
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "chrome/browser/sync/syncable/syncable_id.h"
-#include "chrome/browser/sync/util/cryptographer.h"
-#include "chrome/browser/sync/util/immutable.h"
-#include "chrome/browser/sync/util/time.h"
-#include "chrome/browser/sync/util/weak_handle.h"
-#include "sync/protocol/sync.pb.h"
-
-namespace base {
-class DictionaryValue;
-class ListValue;
-}
-
-namespace browser_sync {
-class Encryptor;
-} // namespace browser_sync
-
-namespace sync_api {
-class ReadTransaction;
-class WriteNode;
-class ReadNode;
-} // sync_api
-
-namespace syncable {
-class DirectoryChangeDelegate;
-class TransactionObserver;
-class Entry;
-
-std::ostream& operator<<(std::ostream& s, const Entry& e);
-
-class DirectoryBackingStore;
-
-static const int64 kInvalidMetaHandle = 0;
-
-// Things you need to update if you change any of the fields below:
-// - EntryKernel struct in syncable.h (this file)
-// - syncable_columns.h
-// - syncable_enum_conversions{.h,.cc,_unittest.cc}
-// - EntryKernel::EntryKernel(), EntryKernel::ToValue(), operator<<
-// for Entry in syncable.cc
-// - BindFields() and UnpackEntry() in directory_backing_store.cc
-// - TestSimpleFieldsPreservedDuringSaveChanges in syncable_unittest.cc
-
-enum {
- BEGIN_FIELDS = 0,
- INT64_FIELDS_BEGIN = BEGIN_FIELDS
-};
-
-enum MetahandleField {
- // Primary key into the table. Keep this as a handle to the meta entry
- // across transactions.
- META_HANDLE = INT64_FIELDS_BEGIN
-};
-
-enum BaseVersion {
- // After initial upload, the version is controlled by the server, and is
- // increased whenever the data or metadata changes on the server.
- BASE_VERSION = META_HANDLE + 1,
-};
-
-enum Int64Field {
- SERVER_VERSION = BASE_VERSION + 1,
-
- // A numeric position value that indicates the relative ordering of
- // this object among its siblings.
- SERVER_POSITION_IN_PARENT,
-
- LOCAL_EXTERNAL_ID, // ID of an item in the external local storage that this
- // entry is associated with. (such as bookmarks.js)
-
- INT64_FIELDS_END
-};
-
-enum {
- INT64_FIELDS_COUNT = INT64_FIELDS_END - INT64_FIELDS_BEGIN,
- TIME_FIELDS_BEGIN = INT64_FIELDS_END,
-};
-
-enum TimeField {
- MTIME = TIME_FIELDS_BEGIN,
- SERVER_MTIME,
- CTIME,
- SERVER_CTIME,
- TIME_FIELDS_END,
-};
-
-enum {
- TIME_FIELDS_COUNT = TIME_FIELDS_END - TIME_FIELDS_BEGIN,
- ID_FIELDS_BEGIN = TIME_FIELDS_END,
-};
-
-enum IdField {
- // Code in InitializeTables relies on ID being the first IdField value.
- ID = ID_FIELDS_BEGIN,
- PARENT_ID,
- SERVER_PARENT_ID,
-
- PREV_ID,
- NEXT_ID,
- ID_FIELDS_END
-};
-
-enum {
- ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
- BIT_FIELDS_BEGIN = ID_FIELDS_END
-};
-
-enum IndexedBitField {
- IS_UNSYNCED = BIT_FIELDS_BEGIN,
- IS_UNAPPLIED_UPDATE,
- INDEXED_BIT_FIELDS_END,
-};
-
-enum IsDelField {
- IS_DEL = INDEXED_BIT_FIELDS_END,
-};
-
-enum BitField {
- IS_DIR = IS_DEL + 1,
- SERVER_IS_DIR,
- SERVER_IS_DEL,
- BIT_FIELDS_END
-};
-
-enum {
- BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
- STRING_FIELDS_BEGIN = BIT_FIELDS_END
-};
-
-enum StringField {
- // Name, will be truncated by server. Can be duplicated in a folder.
- NON_UNIQUE_NAME = STRING_FIELDS_BEGIN,
- // The server version of |NON_UNIQUE_NAME|.
- SERVER_NON_UNIQUE_NAME,
-
- // A tag string which identifies this node as a particular top-level
- // permanent object. The tag can be thought of as a unique key that
- // identifies a singleton instance.
- UNIQUE_SERVER_TAG, // Tagged by the server
- UNIQUE_CLIENT_TAG, // Tagged by the client
- STRING_FIELDS_END,
-};
-
-enum {
- STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
- PROTO_FIELDS_BEGIN = STRING_FIELDS_END
-};
-
-// From looking at the sqlite3 docs, it's not directly stated, but it
-// seems the overhead for storing a NULL blob is very small.
-enum ProtoField {
- SPECIFICS = PROTO_FIELDS_BEGIN,
- SERVER_SPECIFICS,
- BASE_SERVER_SPECIFICS,
- PROTO_FIELDS_END,
-};
-
-enum {
- PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN
-};
-
-enum {
- FIELD_COUNT = PROTO_FIELDS_END,
- // Past this point we have temporaries, stored in memory only.
- BEGIN_TEMPS = PROTO_FIELDS_END,
- BIT_TEMPS_BEGIN = BEGIN_TEMPS,
-};
-
-enum BitTemp {
- SYNCING = BIT_TEMPS_BEGIN,
- BIT_TEMPS_END,
-};
-
-enum {
- BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
-};
-
-class BaseTransaction;
-class WriteTransaction;
-class ReadTransaction;
-class Directory;
-
-// Instead of:
-// Entry e = transaction.GetById(id);
-// use:
-// Entry e(transaction, GET_BY_ID, id);
-//
-// Why? The former would require a copy constructor, and it would be difficult
-// to enforce that an entry never outlived its transaction if there were a copy
-// constructor.
-enum GetById {
- GET_BY_ID
-};
-
-enum GetByClientTag {
- GET_BY_CLIENT_TAG
-};
-
-enum GetByServerTag {
- GET_BY_SERVER_TAG
-};
-
-enum GetByHandle {
- GET_BY_HANDLE
-};
-
-enum Create {
- CREATE
-};
-
-enum CreateNewUpdateItem {
- CREATE_NEW_UPDATE_ITEM
-};
-
-typedef std::set<int64> MetahandleSet;
-
-// TODO(akalin): Move EntryKernel and related into its own header file.
-
-// Why the singular enums? So the code compile-time dispatches instead of
-// runtime dispatches as it would with a single enum and an if() statement.
-
-// The EntryKernel class contains the actual data for an entry.
-struct EntryKernel {
- private:
- std::string string_fields[STRING_FIELDS_COUNT];
- sync_pb::EntitySpecifics specifics_fields[PROTO_FIELDS_COUNT];
- int64 int64_fields[INT64_FIELDS_COUNT];
- base::Time time_fields[TIME_FIELDS_COUNT];
- Id id_fields[ID_FIELDS_COUNT];
- std::bitset<BIT_FIELDS_COUNT> bit_fields;
- std::bitset<BIT_TEMPS_COUNT> bit_temps;
-
- public:
- EntryKernel();
- ~EntryKernel();
-
- // Set the dirty bit, and optionally add this entry's metahandle to
- // a provided index on dirty bits in |dirty_index|. Parameter may be null,
- // and will result only in setting the dirty bit of this entry.
- inline void mark_dirty(syncable::MetahandleSet* dirty_index) {
- if (!dirty_ && dirty_index) {
- DCHECK_NE(0, ref(META_HANDLE));
- dirty_index->insert(ref(META_HANDLE));
- }
- dirty_ = true;
- }
-
- // Clear the dirty bit, and optionally remove this entry's metahandle from
- // a provided index on dirty bits in |dirty_index|. Parameter may be null,
- // and will result only in clearing dirty bit of this entry.
- inline void clear_dirty(syncable::MetahandleSet* dirty_index) {
- if (dirty_ && dirty_index) {
- DCHECK_NE(0, ref(META_HANDLE));
- dirty_index->erase(ref(META_HANDLE));
- }
- dirty_ = false;
- }
-
- inline bool is_dirty() const {
- return dirty_;
- }
-
- // Setters.
- inline void put(MetahandleField field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(Int64Field field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(TimeField field, const base::Time& value) {
- // Round-trip to proto time format and back so that we have
- // consistent time resolutions (ms).
- time_fields[field - TIME_FIELDS_BEGIN] =
- browser_sync::ProtoTimeToTime(
- browser_sync::TimeToProtoTime(value));
- }
- inline void put(IdField field, const Id& value) {
- id_fields[field - ID_FIELDS_BEGIN] = value;
- }
- inline void put(BaseVersion field, int64 value) {
- int64_fields[field - INT64_FIELDS_BEGIN] = value;
- }
- inline void put(IndexedBitField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(IsDelField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(BitField field, bool value) {
- bit_fields[field - BIT_FIELDS_BEGIN] = value;
- }
- inline void put(StringField field, const std::string& value) {
- string_fields[field - STRING_FIELDS_BEGIN] = value;
- }
- inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
- specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
- }
- inline void put(BitTemp field, bool value) {
- bit_temps[field - BIT_TEMPS_BEGIN] = value;
- }
-
- // Const ref getters.
- inline int64 ref(MetahandleField field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline int64 ref(Int64Field field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline const base::Time& ref(TimeField field) const {
- return time_fields[field - TIME_FIELDS_BEGIN];
- }
- inline const Id& ref(IdField field) const {
- return id_fields[field - ID_FIELDS_BEGIN];
- }
- inline int64 ref(BaseVersion field) const {
- return int64_fields[field - INT64_FIELDS_BEGIN];
- }
- inline bool ref(IndexedBitField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline bool ref(IsDelField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline bool ref(BitField field) const {
- return bit_fields[field - BIT_FIELDS_BEGIN];
- }
- inline const std::string& ref(StringField field) const {
- return string_fields[field - STRING_FIELDS_BEGIN];
- }
- inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
- return specifics_fields[field - PROTO_FIELDS_BEGIN];
- }
- inline bool ref(BitTemp field) const {
- return bit_temps[field - BIT_TEMPS_BEGIN];
- }
-
- // Non-const, mutable ref getters for object types only.
- inline std::string& mutable_ref(StringField field) {
- return string_fields[field - STRING_FIELDS_BEGIN];
- }
- inline sync_pb::EntitySpecifics& mutable_ref(ProtoField field) {
- return specifics_fields[field - PROTO_FIELDS_BEGIN];
- }
- inline Id& mutable_ref(IdField field) {
- return id_fields[field - ID_FIELDS_BEGIN];
- }
-
- syncable::ModelType GetServerModelType() const;
-
- // Does a case in-sensitive search for a given string, which must be
- // lower case.
- bool ContainsString(const std::string& lowercase_query) const;
-
- // Dumps all kernel info into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* ToValue() const;
-
- private:
- // Tracks whether this entry needs to be saved to the database.
- bool dirty_;
-};
-
-// A read-only meta entry.
-class Entry {
- friend class Directory;
- friend std::ostream& operator << (std::ostream& s, const Entry& e);
-
- public:
- // After constructing, you must check good() to test whether the Get
- // succeeded.
- Entry(BaseTransaction* trans, GetByHandle, int64 handle);
- Entry(BaseTransaction* trans, GetById, const Id& id);
- Entry(BaseTransaction* trans, GetByServerTag, const std::string& tag);
- Entry(BaseTransaction* trans, GetByClientTag, const std::string& tag);
-
- bool good() const { return 0 != kernel_; }
-
- BaseTransaction* trans() const { return basetrans_; }
-
- // Field accessors.
- inline int64 Get(MetahandleField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline Id Get(IdField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline int64 Get(Int64Field field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline const base::Time& Get(TimeField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline int64 Get(BaseVersion field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline bool Get(IndexedBitField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline bool Get(IsDelField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline bool Get(BitField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- const std::string& Get(StringField field) const;
- inline const sync_pb::EntitySpecifics& Get(ProtoField field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
- inline bool Get(BitTemp field) const {
- DCHECK(kernel_);
- return kernel_->ref(field);
- }
-
- ModelType GetServerModelType() const;
- ModelType GetModelType() const;
-
- inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
- DCHECK(kernel_);
- return !kernel_->ref(NON_UNIQUE_NAME).empty();
- }
-
- inline bool IsRoot() const {
- DCHECK(kernel_);
- return kernel_->ref(ID).IsRoot();
- }
-
- Directory* dir() const;
-
- const EntryKernel GetKernelCopy() const {
- return *kernel_;
- }
-
- // Compute a local predecessor position for |update_item|, based on its
- // absolute server position. The returned ID will be a valid predecessor
- // under SERVER_PARENT_ID that is consistent with the
- // SERVER_POSITION_IN_PARENT ordering.
- Id ComputePrevIdFromServerPosition(const Id& parent_id) const;
-
- // Dumps all entry info into a DictionaryValue and returns it.
- // Transfers ownership of the DictionaryValue to the caller.
- base::DictionaryValue* ToValue() const;
-
- protected: // Don't allow creation on heap, except by sync API wrappers.
- friend class sync_api::ReadNode;
- void* operator new(size_t size) { return (::operator new)(size); }
-
- inline explicit Entry(BaseTransaction* trans)
- : basetrans_(trans),
- kernel_(NULL) { }
-
- protected:
- BaseTransaction* const basetrans_;
-
- EntryKernel* kernel_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Entry);
-};
-
-// A mutable meta entry. Changes get committed to the database when the
-// WriteTransaction is destroyed.
-class MutableEntry : public Entry {
- friend class WriteTransaction;
- friend class Directory;
- void Init(WriteTransaction* trans, const Id& parent_id,
- const std::string& name);
-
- public:
- MutableEntry(WriteTransaction* trans, Create, const Id& parent_id,
- const std::string& name);
- MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
- MutableEntry(WriteTransaction* trans, GetByHandle, int64);
- MutableEntry(WriteTransaction* trans, GetById, const Id&);
- MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
- MutableEntry(WriteTransaction* trans, GetByServerTag, const std::string& tag);
-
- inline WriteTransaction* write_transaction() const {
- return write_transaction_;
- }
-
- // Field Accessors. Some of them trigger the re-indexing of the entry.
- // Return true on success, return false on failure, which means
- // that putting the value would have caused a duplicate in the index.
- // TODO(chron): Remove some of these unecessary return values.
- bool Put(Int64Field field, const int64& value);
- bool Put(TimeField field, const base::Time& value);
- bool Put(IdField field, const Id& value);
-
- // Do a simple property-only update if the PARENT_ID field. Use with caution.
- //
- // The normal Put(IS_PARENT) call will move the item to the front of the
- // sibling order to maintain the linked list invariants when the parent
- // changes. That's usually what you want to do, but it's inappropriate
- // when the caller is trying to change the parent ID of a the whole set
- // of children (e.g. because the ID changed during a commit). For those
- // cases, there's this function. It will corrupt the sibling ordering
- // if you're not careful.
- void PutParentIdPropertyOnly(const Id& parent_id);
-
- bool Put(StringField field, const std::string& value);
- bool Put(BaseVersion field, int64 value);
-
- bool Put(ProtoField field, const sync_pb::EntitySpecifics& value);
- bool Put(BitField field, bool value);
- inline bool Put(IsDelField field, bool value) {
- return PutIsDel(value);
- }
- bool Put(IndexedBitField field, bool value);
-
- // Sets the position of this item, and updates the entry kernels of the
- // adjacent siblings so that list invariants are maintained. Returns false
- // and fails if |predecessor_id| does not identify a sibling. Pass the root
- // ID to put the node in first position.
- bool PutPredecessor(const Id& predecessor_id);
-
- bool Put(BitTemp field, bool value);
-
- protected:
- syncable::MetahandleSet* GetDirtyIndexHelper();
-
- bool PutIsDel(bool value);
-
- private: // Don't allow creation on heap, except by sync API wrappers.
- friend class sync_api::WriteNode;
- void* operator new(size_t size) { return (::operator new)(size); }
-
- bool PutImpl(StringField field, const std::string& value);
- bool PutUniqueClientTag(const std::string& value);
-
- // Adjusts the successor and predecessor entries so that they no longer
- // refer to this entry.
- bool UnlinkFromOrder();
-
- // Kind of redundant. We should reduce the number of pointers
- // floating around if at all possible. Could we store this in Directory?
- // Scope: Set on construction, never changed after that.
- WriteTransaction* const write_transaction_;
-
- protected:
- MutableEntry();
-
- DISALLOW_COPY_AND_ASSIGN(MutableEntry);
-};
-
-template <typename FieldType, FieldType field_index> class LessField;
-
-class EntryKernelLessByMetaHandle {
- public:
- inline bool operator()(const EntryKernel& a,
- const EntryKernel& b) const {
- return a.ref(META_HANDLE) < b.ref(META_HANDLE);
- }
-};
-typedef std::set<EntryKernel, EntryKernelLessByMetaHandle> EntryKernelSet;
-
-struct EntryKernelMutation {
- EntryKernel original, mutated;
-};
-typedef std::map<int64, EntryKernelMutation> EntryKernelMutationMap;
-
-typedef browser_sync::Immutable<EntryKernelMutationMap>
- ImmutableEntryKernelMutationMap;
-
-// A WriteTransaction has a writer tag describing which body of code is doing
-// the write. This is defined up here since WriteTransactionInfo also contains
-// one.
-enum WriterTag {
- INVALID,
- SYNCER,
- AUTHWATCHER,
- UNITTEST,
- VACUUM_AFTER_SAVE,
- PURGE_ENTRIES,
- SYNCAPI
-};
-
-// Make sure to update this if you update WriterTag.
-std::string WriterTagToString(WriterTag writer_tag);
-
-struct WriteTransactionInfo {
- WriteTransactionInfo(int64 id,
- tracked_objects::Location location,
- WriterTag writer,
- ImmutableEntryKernelMutationMap mutations);
- WriteTransactionInfo();
- ~WriteTransactionInfo();
-
- // Caller owns the return value.
- base::DictionaryValue* ToValue(size_t max_mutations_size) const;
-
- int64 id;
- // If tracked_objects::Location becomes assignable, we can use that
- // instead.
- std::string location_string;
- WriterTag writer;
- ImmutableEntryKernelMutationMap mutations;
-};
-
-typedef
- browser_sync::Immutable<WriteTransactionInfo>
- ImmutableWriteTransactionInfo;
-
-// Caller owns the return value.
-base::DictionaryValue* EntryKernelMutationToValue(
- const EntryKernelMutation& mutation);
-
-// Caller owns the return value.
-base::ListValue* EntryKernelMutationMapToValue(
- const EntryKernelMutationMap& mutations);
-
-// How syncable indices & Indexers work.
-//
-// The syncable Directory maintains several indices on the Entries it tracks.
-// The indices follow a common pattern:
-// (a) The index allows efficient lookup of an Entry* with particular
-// field values. This is done by use of a std::set<> and a custom
-// comparator.
-// (b) There may be conditions for inclusion in the index -- for example,
-// deleted items might not be indexed.
-// (c) Because the index set contains only Entry*, one must be careful
-// to remove Entries from the set before updating the value of
-// an indexed field.
-// The traits of an index are a Comparator (to define the set ordering) and a
-// ShouldInclude function (to define the conditions for inclusion). For each
-// index, the traits are grouped into a class called an Indexer which
-// can be used as a template type parameter.
-
-// Traits type for metahandle index.
-struct MetahandleIndexer {
- // This index is of the metahandle field values.
- typedef LessField<MetahandleField, META_HANDLE> Comparator;
-
- // This index includes all entries.
- inline static bool ShouldInclude(const EntryKernel* a) {
- return true;
- }
-};
-
-// Traits type for ID field index.
-struct IdIndexer {
- // This index is of the ID field values.
- typedef LessField<IdField, ID> Comparator;
-
- // This index includes all entries.
- inline static bool ShouldInclude(const EntryKernel* a) {
- return true;
- }
-};
-
-// Traits type for unique client tag index.
-struct ClientTagIndexer {
- // This index is of the client-tag values.
- typedef LessField<StringField, UNIQUE_CLIENT_TAG> Comparator;
-
- // Items are only in this index if they have a non-empty client tag value.
- static bool ShouldInclude(const EntryKernel* a);
-};
-
-// This index contains EntryKernels ordered by parent ID and metahandle.
-// It allows efficient lookup of the children of a given parent.
-struct ParentIdAndHandleIndexer {
- // This index is of the parent ID and metahandle. We use a custom
- // comparator.
- class Comparator {
- public:
- bool operator() (const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const;
- };
-
- // This index does not include deleted items.
- static bool ShouldInclude(const EntryKernel* a);
-};
-
-// Given an Indexer providing the semantics of an index, defines the
-// set type used to actually contain the index.
-template <typename Indexer>
-struct Index {
- typedef std::set<EntryKernel*, typename Indexer::Comparator> Set;
-};
-
-// The name Directory in this case means the entire directory
-// structure within a single user account.
-//
-// The db is protected against concurrent modification by a reader/
-// writer lock, negotiated by the ReadTransaction and WriteTransaction
-// friend classes. The in-memory indices are protected against
-// concurrent modification by the kernel lock.
-//
-// All methods which require the reader/writer lock to be held either
-// are protected and only called from friends in a transaction
-// or are public and take a Transaction* argument.
-//
-// All methods which require the kernel lock to be already held take a
-// ScopeKernelLock* argument.
-//
-// To prevent deadlock, the reader writer transaction lock must always
-// be held before acquiring the kernel lock.
-class ScopedKernelLock;
-class IdFilter;
-
-class Directory {
- friend class BaseTransaction;
- friend class Entry;
- friend class MutableEntry;
- friend class ReadTransaction;
- friend class ReadTransactionWithoutDB;
- friend class ScopedKernelLock;
- friend class ScopedKernelUnlock;
- friend class WriteTransaction;
- friend class SyncableDirectoryTest;
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsAllDirtyHandlesTest);
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsOnlyDirtyHandlesTest);
- FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
- TakeSnapshotGetsMetahandlesToPurge);
-
- public:
- static const FilePath::CharType kSyncDatabaseFilename[];
-
- // Various data that the Directory::Kernel we are backing (persisting data
- // for) needs saved across runs of the application.
- struct PersistedKernelInfo {
- PersistedKernelInfo();
- ~PersistedKernelInfo();
-
- // Set the |download_progress| entry for the given model to a
- // "first sync" start point. When such a value is sent to the server,
- // a full download of all objects of the model will be initiated.
- void reset_download_progress(ModelType model_type);
-
- // Last sync timestamp fetched from the server.
- sync_pb::DataTypeProgressMarker download_progress[MODEL_TYPE_COUNT];
- // true iff we ever reached the end of the changelog.
- ModelTypeSet initial_sync_ended;
- // The store birthday we were given by the server. Contents are opaque to
- // the client.
- std::string store_birthday;
- // The next local ID that has not been used with this cache-GUID.
- int64 next_id;
- // The persisted notification state.
- std::string notification_state;
- };
-
- // What the Directory needs on initialization to create itself and its Kernel.
- // Filled by DirectoryBackingStore::Load.
- struct KernelLoadInfo {
- PersistedKernelInfo kernel_info;
- std::string cache_guid; // Created on first initialization, never changes.
- int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
- KernelLoadInfo() : max_metahandle(0) {
- }
- };
-
- // The dirty/clean state of kernel fields backed by the share_info table.
- // This is public so it can be used in SaveChangesSnapshot for persistence.
- enum KernelShareInfoStatus {
- KERNEL_SHARE_INFO_INVALID,
- KERNEL_SHARE_INFO_VALID,
- KERNEL_SHARE_INFO_DIRTY
- };
-
- // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
- // constructed and forms a consistent snapshot of what needs to be sent to
- // the backing store.
- struct SaveChangesSnapshot {
- SaveChangesSnapshot();
- ~SaveChangesSnapshot();
-
- KernelShareInfoStatus kernel_info_status;
- PersistedKernelInfo kernel_info;
- EntryKernelSet dirty_metas;
- MetahandleSet metahandles_to_purge;
- };
-
- // Does not take ownership of |encryptor|.
- // |report_unrecoverable_error_function| may be NULL.
- Directory(
- browser_sync::Encryptor* encryptor,
- browser_sync::UnrecoverableErrorHandler* unrecoverable_error_handler,
- browser_sync::ReportUnrecoverableErrorFunction
- report_unrecoverable_error_function);
- virtual ~Directory();
-
- // Does not take ownership of |delegate|, which must not be NULL.
- // Starts sending events to |delegate| if the returned result is
- // OPENED. Note that events to |delegate| may be sent from *any*
- // thread. |transaction_observer| must be initialized.
- DirOpenResult Open(const FilePath& file_path, const std::string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer);
-
- // Same as above, but does not create a file to persist the database. This is
- // useful for tests where we were not planning to persist this data and don't
- // want to pay the performance penalty of using a real database.
- DirOpenResult OpenInMemoryForTest(
- const std::string& name, DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer);
-
- // Stops sending events to the delegate and the transaction
- // observer.
- void Close();
-
- int64 NextMetahandle();
- // Always returns a negative id. Positive client ids are generated
- // by the server only.
- Id NextId();
-
- bool good() const { return NULL != store_; }
-
- // The download progress is an opaque token provided by the sync server
- // to indicate the continuation state of the next GetUpdates operation.
- void GetDownloadProgress(
- ModelType type,
- sync_pb::DataTypeProgressMarker* value_out) const;
- void GetDownloadProgressAsString(
- ModelType type,
- std::string* value_out) const;
- size_t GetEntriesCount() const;
- void SetDownloadProgress(
- ModelType type,
- const sync_pb::DataTypeProgressMarker& value);
-
- bool initial_sync_ended_for_type(ModelType type) const;
- void set_initial_sync_ended_for_type(ModelType type, bool value);
-
- const std::string& name() const { return kernel_->name; }
-
- // (Account) Store birthday is opaque to the client, so we keep it in the
- // format it is in the proto buffer in case we switch to a binary birthday
- // later.
- std::string store_birthday() const;
- void set_store_birthday(const std::string& store_birthday);
-
- std::string GetNotificationState() const;
- void SetNotificationState(const std::string& notification_state);
-
- // Unique to each account / client pair.
- std::string cache_guid() const;
-
- // Returns a pointer to our cryptographer. Does not transfer ownership. The
- // cryptographer is not thread safe; it should not be accessed after the
- // transaction has been released.
- browser_sync::Cryptographer* GetCryptographer(const BaseTransaction* trans);
-
- // Returns true if the directory had encountered an unrecoverable error.
- // Note: Any function in |Directory| that can be called without holding a
- // transaction need to check if the Directory already has an unrecoverable
- // error on it.
- bool unrecoverable_error_set(const BaseTransaction* trans) const;
-
- // Called to immediately report an unrecoverable error (but don't
- // propagate it up).
- void ReportUnrecoverableError() {
- if (report_unrecoverable_error_function_) {
- report_unrecoverable_error_function_();
- }
- }
-
- // Called to set the unrecoverable error on the directory and to propagate
- // the error to upper layers.
- void OnUnrecoverableError(const BaseTransaction* trans,
- const tracked_objects::Location& location,
- const std::string & message);
-
- protected: // for friends, mainly used by Entry constructors
- virtual EntryKernel* GetEntryByHandle(int64 handle);
- virtual EntryKernel* GetEntryByHandle(int64 metahandle,
- ScopedKernelLock* lock);
- virtual EntryKernel* GetEntryById(const Id& id);
- EntryKernel* GetEntryByServerTag(const std::string& tag);
- virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
- EntryKernel* GetRootEntry();
- bool ReindexId(WriteTransaction* trans, EntryKernel* const entry,
- const Id& new_id);
- bool ReindexParentId(WriteTransaction* trans, EntryKernel* const entry,
- const Id& new_parent_id);
- void ClearDirtyMetahandles();
-
- // These don't do semantic checking.
- // The semantic checking is implemented higher up.
- bool UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock);
-
- DirOpenResult OpenImpl(
- DirectoryBackingStore* store, const std::string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer);
-
- private:
- // These private versions expect the kernel lock to already be held
- // before calling.
- EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
-
- template <class T> void TestAndSet(T* kernel_data, const T* data_to_set);
-
- public:
- typedef std::vector<int64> ChildHandles;
-
- // Returns the child meta handles (even those for deleted/unlinked
- // nodes) for given parent id. Clears |result| if there are no
- // children.
- bool GetChildHandlesById(BaseTransaction*, const Id& parent_id,
- ChildHandles* result);
-
- // Returns the child meta handles (even those for deleted/unlinked
- // nodes) for given meta handle. Clears |result| if there are no
- // children.
- bool GetChildHandlesByHandle(BaseTransaction*, int64 handle,
- ChildHandles* result);
-
- // Returns true iff |id| has children.
- bool HasChildren(BaseTransaction* trans, const Id& id);
-
- // Find the first child in the positional ordering under a parent,
- // and fill in |*first_child_id| with its id. Fills in a root Id if
- // parent has no children. Returns true if the first child was
- // successfully found, or false if an error was encountered.
- bool GetFirstChildId(BaseTransaction* trans, const Id& parent_id,
- Id* first_child_id) WARN_UNUSED_RESULT;
-
- // Find the last child in the positional ordering under a parent,
- // and fill in |*first_child_id| with its id. Fills in a root Id if
- // parent has no children. Returns true if the first child was
- // successfully found, or false if an error was encountered.
- bool GetLastChildIdForTest(BaseTransaction* trans, const Id& parent_id,
- Id* last_child_id) WARN_UNUSED_RESULT;
-
- // Compute a local predecessor position for |update_item|. The position
- // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
- // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
- // children of |parent_id|.
- Id ComputePrevIdFromServerPosition(
- const EntryKernel* update_item,
- const syncable::Id& parent_id);
-
- // SaveChanges works by taking a consistent snapshot of the current Directory
- // state and indices (by deep copy) under a ReadTransaction, passing this
- // snapshot to the backing store under no transaction, and finally cleaning
- // up by either purging entries no longer needed (this part done under a
- // WriteTransaction) or rolling back the dirty bits. It also uses
- // internal locking to enforce SaveChanges operations are mutually exclusive.
- //
- // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
- bool SaveChanges();
-
- // Fill in |result| with all entry kernels.
- void GetAllEntryKernels(BaseTransaction* trans,
- std::vector<const EntryKernel*>* result);
-
- // Returns the number of entities with the unsynced bit set.
- int64 unsynced_entity_count() const;
-
- // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
- // before any new entries have been created. The intention is that the
- // syncer should call it from its PerformSyncQueries member.
- typedef std::vector<int64> UnsyncedMetaHandles;
- void GetUnsyncedMetaHandles(BaseTransaction* trans,
- UnsyncedMetaHandles* result);
-
- // Returns all server types with unapplied updates. A subset of
- // those types can then be passed into
- // GetUnappliedUpdateMetaHandles() below.
- FullModelTypeSet GetServerTypesWithUnappliedUpdates(
- BaseTransaction* trans) const;
-
- // Get all the metahandles for unapplied updates for a given set of
- // server types.
- typedef std::vector<int64> UnappliedUpdateMetaHandles;
- void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
- FullModelTypeSet server_types,
- UnappliedUpdateMetaHandles* result);
-
- // Checks tree metadata consistency.
- // If full_scan is false, the function will avoid pulling any entries from the
- // db and scan entries currently in ram.
- // If full_scan is true, all entries will be pulled from the database.
- // No return value, CHECKs will be triggered if we're given bad
- // information.
- bool CheckTreeInvariants(syncable::BaseTransaction* trans,
- bool full_scan);
-
- bool CheckTreeInvariants(syncable::BaseTransaction* trans,
- const EntryKernelMutationMap& mutations);
-
- bool CheckTreeInvariants(syncable::BaseTransaction* trans,
- const MetahandleSet& handles,
- const IdFilter& idfilter);
-
- // Purges all data associated with any entries whose ModelType or
- // ServerModelType is found in |types|, from _both_ memory and disk.
- // Only valid, "real" model types are allowed in |types| (see model_type.h
- // for definitions). "Purge" is just meant to distinguish from "deleting"
- // entries, which means something different in the syncable namespace.
- // WARNING! This can be real slow, as it iterates over all entries.
- // WARNING! Performs synchronous I/O.
- virtual void PurgeEntriesWithTypeIn(ModelTypeSet types);
-
- private:
- // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
- // and unapplied_metahandles from metahandles_index.
- void InitializeIndices();
-
- // Constructs a consistent snapshot of the current Directory state and
- // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
- // See SaveChanges() for more information.
- void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
-
- // Purges from memory any unused, safe to remove entries that were
- // successfully deleted on disk as a result of the SaveChanges that processed
- // |snapshot|. See SaveChanges() for more information.
- bool VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
-
- // Rolls back dirty bits in the event that the SaveChanges that
- // processed |snapshot| failed, for example, due to no disk space.
- void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
-
- // For new entry creation only
- bool InsertEntry(WriteTransaction* trans,
- EntryKernel* entry, ScopedKernelLock* lock);
- bool InsertEntry(WriteTransaction* trans, EntryKernel* entry);
-
- // Used by CheckTreeInvariants
- void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
- bool SafeToPurgeFromMemory(WriteTransaction* trans,
- const EntryKernel* const entry) const;
-
- // Internal setters that do not acquire a lock internally. These are unsafe
- // on their own; caller must guarantee exclusive access manually by holding
- // a ScopedKernelLock.
- void set_initial_sync_ended_for_type_unsafe(ModelType type, bool x);
- void SetNotificationStateUnsafe(const std::string& notification_state);
-
- Directory& operator = (const Directory&);
-
- public:
- typedef Index<MetahandleIndexer>::Set MetahandlesIndex;
- typedef Index<IdIndexer>::Set IdsIndex;
- // All entries in memory must be in both the MetahandlesIndex and
- // the IdsIndex, but only non-deleted entries will be the
- // ParentIdChildIndex.
- typedef Index<ParentIdAndHandleIndexer>::Set ParentIdChildIndex;
-
- // Contains both deleted and existing entries with tags.
- // We can't store only existing tags because the client would create
- // items that had a duplicated ID in the end, resulting in a DB key
- // violation. ID reassociation would fail after an attempted commit.
- typedef Index<ClientTagIndexer>::Set ClientTagIndex;
-
- protected:
- // Used by tests. |delegate| must not be NULL.
- // |transaction_observer| must be initialized.
- void InitKernelForTest(
- const std::string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer);
-
- private:
- struct Kernel {
- // |delegate| must not be NULL. |transaction_observer| must be
- // initialized.
- Kernel(const std::string& name, const KernelLoadInfo& info,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer);
-
- ~Kernel();
-
- void AddRef(); // For convenience.
- void Release();
-
- // TODO(timsteele): audit use of the member and remove if possible
- volatile base::subtle::AtomicWord refcount;
-
- // Implements ReadTransaction / WriteTransaction using a simple lock.
- base::Lock transaction_mutex;
-
- // Protected by transaction_mutex. Used by WriteTransactions.
- int64 next_write_transaction_id;
-
- // The name of this directory.
- std::string const name;
-
- // Protects all members below.
- // The mutex effectively protects all the indices, but not the
- // entries themselves. So once a pointer to an entry is pulled
- // from the index, the mutex can be unlocked and entry read or written.
- //
- // Never hold the mutex and do anything with the database or any
- // other buffered IO. Violating this rule will result in deadlock.
- base::Lock mutex;
- // Entries indexed by metahandle
- MetahandlesIndex* metahandles_index;
- // Entries indexed by id
- IdsIndex* ids_index;
- ParentIdChildIndex* parent_id_child_index;
- ClientTagIndex* client_tag_index;
- // So we don't have to create an EntryKernel every time we want to
- // look something up in an index. Needle in haystack metaphor.
- EntryKernel needle;
-
- // 3 in-memory indices on bits used extremely frequently by the syncer.
- // |unapplied_update_metahandles| is keyed by the server model type.
- MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
- MetahandleSet* const unsynced_metahandles;
- // Contains metahandles that are most likely dirty (though not
- // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
- MetahandleSet* const dirty_metahandles;
-
- // When a purge takes place, we remove items from all our indices and stash
- // them in here so that SaveChanges can persist their permanent deletion.
- MetahandleSet* const metahandles_to_purge;
-
- KernelShareInfoStatus info_status;
-
- // These 3 members are backed in the share_info table, and
- // their state is marked by the flag above.
-
- // A structure containing the Directory state that is written back into the
- // database on SaveChanges.
- PersistedKernelInfo persisted_info;
-
- // A unique identifier for this account's cache db, used to generate
- // unique server IDs. No need to lock, only written at init time.
- const std::string cache_guid;
-
- // It doesn't make sense for two threads to run SaveChanges at the same
- // time; this mutex protects that activity.
- base::Lock save_changes_mutex;
-
- // The next metahandle is protected by kernel mutex.
- int64 next_metahandle;
-
- // The delegate for directory change events. Must not be NULL.
- DirectoryChangeDelegate* const delegate;
-
- // The transaction observer.
- const browser_sync::WeakHandle<TransactionObserver> transaction_observer;
- };
-
- // Helper method used to do searches on |parent_id_child_index|.
- ParentIdChildIndex::iterator LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking);
-
- // Return an iterator to the beginning of the range of the children of
- // |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexLowerBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
- // Return an iterator to just past the end of the range of the
- // children of |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexUpperBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
- // Append the handles of the children of |parent_id| to |result|.
- void AppendChildHandles(
- const ScopedKernelLock& lock,
- const Id& parent_id, Directory::ChildHandles* result);
-
- // Return a pointer to what is probably (but not certainly) the
- // first child of |parent_id|, or NULL if |parent_id| definitely has
- // no children.
- EntryKernel* GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id);
-
- // Return a pointer to what is probably (but not certainly) the last
- // child of |parent_id|, or NULL if |parent_id| definitely has no
- // children.
- EntryKernel* GetPossibleLastChildForTest(
- const ScopedKernelLock& lock, const Id& parent_id);
-
- browser_sync::Cryptographer cryptographer_;
-
- Kernel* kernel_;
-
- DirectoryBackingStore* store_;
-
- browser_sync::UnrecoverableErrorHandler* const unrecoverable_error_handler_;
- const browser_sync::ReportUnrecoverableErrorFunction
- report_unrecoverable_error_function_;
- bool unrecoverable_error_set_;
-};
-
-class ScopedKernelLock {
- public:
- explicit ScopedKernelLock(const Directory*);
- ~ScopedKernelLock() {}
-
- base::AutoLock scoped_lock_;
- Directory* const dir_;
- DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
-};
-
-// Transactions are now processed FIFO with a straight lock
-class BaseTransaction {
- friend class Entry;
- public:
- inline Directory* directory() const { return directory_; }
- inline Id root_id() const { return Id(); }
-
- virtual ~BaseTransaction();
-
- // This should be called when a database corruption is detected and there is
- // no way for us to recover short of wiping the database clean. When this is
- // called we set a bool in the transaction. The caller has to unwind the
- // stack. When the destructor for the transaction is called it acts upon the
- // bool and calls the Directory to handle the unrecoverable error.
- void OnUnrecoverableError(const tracked_objects::Location& location,
- const std::string& message);
-
- bool unrecoverable_error_set() const;
-
- protected:
- BaseTransaction(const tracked_objects::Location& from_here,
- const char* name,
- WriterTag writer,
- Directory* directory);
-
- void Lock();
- void Unlock();
-
- // This should be called before unlocking because it calls the Direcotry's
- // OnUnrecoverableError method which is not protected by locks and could
- // be called from any thread. Holding the transaction lock ensures only one
- // thread could call the method at a time.
- void HandleUnrecoverableErrorIfSet();
-
- const tracked_objects::Location from_here_;
- const char* const name_;
- WriterTag writer_;
- Directory* const directory_;
- Directory::Kernel* const dirkernel_; // for brevity
-
- // Error information.
- bool unrecoverable_error_set_;
- tracked_objects::Location unrecoverable_error_location_;
- std::string unrecoverable_error_msg_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
-};
-
-// Locks db in constructor, unlocks in destructor.
-class ReadTransaction : public BaseTransaction {
- public:
- ReadTransaction(const tracked_objects::Location& from_here,
- Directory* directory);
-
- virtual ~ReadTransaction();
-
- protected: // Don't allow creation on heap, except by sync API wrapper.
- friend class sync_api::ReadTransaction;
- void* operator new(size_t size) { return (::operator new)(size); }
-
- DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
-};
-
-// Locks db in constructor, unlocks in destructor.
-class WriteTransaction : public BaseTransaction {
- friend class MutableEntry;
- public:
- WriteTransaction(const tracked_objects::Location& from_here,
- WriterTag writer, Directory* directory);
-
- virtual ~WriteTransaction();
-
- void SaveOriginal(const EntryKernel* entry);
-
- protected:
- // Overridden by tests.
- virtual void NotifyTransactionComplete(ModelTypeSet models_with_changes);
-
- private:
- // Clears |mutations_|.
- ImmutableEntryKernelMutationMap RecordMutations();
-
- void UnlockAndNotify(const ImmutableEntryKernelMutationMap& mutations);
-
- ModelTypeSet NotifyTransactionChangingAndEnding(
- const ImmutableEntryKernelMutationMap& mutations);
-
- // Only the original fields are filled in until |RecordMutations()|.
- // We use a mutation map instead of a kernel set to avoid copying.
- EntryKernelMutationMap mutations_;
-
- DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
-};
-
-bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid);
-
-// This function sets only the flags needed to get this entry to sync.
-bool MarkForSyncing(syncable::MutableEntry* e);
-
-} // namespace syncable
-
-std::ostream& operator <<(std::ostream&, const syncable::Blob&);
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
diff --git a/chrome/browser/sync/syncable/syncable_changes_version.h b/chrome/browser/sync/syncable/syncable_changes_version.h
deleted file mode 100644
index 51cd3f8..0000000
--- a/chrome/browser/sync/syncable/syncable_changes_version.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
-#pragma once
-
-namespace syncable {
-
-// For the most part, the sync engine treats version numbers as opaque values.
-// However, there are parts of our code base that break this abstraction, and
-// depend on the following two invariants:
-//
-// 1. CHANGES_VERSION is less than 0.
-// 2. The server only issues positive version numbers.
-//
-// Breaking these abstractions makes some operations 10 times
-// faster. If either of these invariants change, then those queries
-// must be revisited.
-
-enum {
- CHANGES_VERSION = -1
-};
-
-#define CHANGES_VERSION_STRING "-1"
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
deleted file mode 100644
index b6f3cfb..0000000
--- a/chrome/browser/sync/syncable/syncable_columns.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
-#pragma once
-
-#include "chrome/browser/sync/syncable/syncable.h"
-#include "chrome/browser/sync/syncable/syncable_changes_version.h"
-
-namespace syncable {
-
-struct ColumnSpec {
- const char* name;
- const char* spec;
-};
-
-// Must be in exact same order as fields in syncable.
-static const ColumnSpec g_metas_columns[] = {
- //////////////////////////////////////
- // int64s
- {"metahandle", "bigint primary key ON CONFLICT FAIL"},
- {"base_version", "bigint default " CHANGES_VERSION_STRING},
- {"server_version", "bigint default 0"},
- {"server_position_in_parent", "bigint default 0"},
- // This is the item ID that we store for the embedding application.
- {"local_external_id", "bigint default 0"},
- // These timestamps are kept in the same format as that of the
- // protocol (ms since Unix epoch).
- {"mtime", "bigint default 0"},
- {"server_mtime", "bigint default 0"},
- {"ctime", "bigint default 0"},
- {"server_ctime", "bigint default 0"},
- //////////////////////////////////////
- // Ids
- {"id", "varchar(255) default \"r\""},
- {"parent_id", "varchar(255) default \"r\""},
- {"server_parent_id", "varchar(255) default \"r\""},
- {"prev_id", "varchar(255) default \"r\""},
- {"next_id", "varchar(255) default \"r\""},
- //////////////////////////////////////
- // bits
- {"is_unsynced", "bit default 0"},
- {"is_unapplied_update", "bit default 0"},
- {"is_del", "bit default 0"},
- {"is_dir", "bit default 0"},
- {"server_is_dir", "bit default 0"},
- {"server_is_del", "bit default 0"},
- //////////////////////////////////////
- // Strings
- {"non_unique_name", "varchar"},
- {"server_non_unique_name", "varchar(255)"},
- {"unique_server_tag", "varchar"},
- {"unique_client_tag", "varchar"},
- //////////////////////////////////////
- // Blobs.
- {"specifics", "blob"},
- {"server_specifics", "blob"},
- {"base_server_specifics", "blob"}
-};
-
-// At least enforce that there are equal number of column names and fields.
-COMPILE_ASSERT(arraysize(g_metas_columns) >= FIELD_COUNT, missing_column_name);
-COMPILE_ASSERT(arraysize(g_metas_columns) <= FIELD_COUNT, extra_column_names);
-
-static inline const char* ColumnName(int field) {
- DCHECK(field < BEGIN_TEMPS);
- return g_metas_columns[field].name;
-}
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
diff --git a/chrome/browser/sync/syncable/syncable_enum_conversions.cc b/chrome/browser/sync/syncable/syncable_enum_conversions.cc
deleted file mode 100644
index a22a1cf..0000000
--- a/chrome/browser/sync/syncable/syncable_enum_conversions.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with syncable.h.
-
-#include "chrome/browser/sync/syncable/syncable_enum_conversions.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-
-namespace syncable {
-
-// We can't tokenize expected_min/expected_max since it can be a
-// general expression.
-#define ASSERT_ENUM_BOUNDS(enum_min, enum_max, expected_min, expected_max) \
- COMPILE_ASSERT(static_cast<int>(enum_min) == \
- static_cast<int>(expected_min), \
- enum_min##_not_expected_min); \
- COMPILE_ASSERT(static_cast<int>(enum_max) == \
- static_cast<int>(expected_max), \
- enum_max##_not_expected_max);
-
-#define ENUM_CASE(enum_value) case enum_value: return #enum_value
-
-const char* GetMetahandleFieldString(MetahandleField metahandle_field) {
- ASSERT_ENUM_BOUNDS(META_HANDLE, META_HANDLE,
- INT64_FIELDS_BEGIN, BASE_VERSION - 1);
- switch (metahandle_field) {
- ENUM_CASE(META_HANDLE);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBaseVersionString(BaseVersion base_version) {
- ASSERT_ENUM_BOUNDS(BASE_VERSION, BASE_VERSION,
- META_HANDLE + 1, SERVER_VERSION - 1);
- switch (base_version) {
- ENUM_CASE(BASE_VERSION);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetInt64FieldString(Int64Field int64_field) {
- ASSERT_ENUM_BOUNDS(SERVER_VERSION, LOCAL_EXTERNAL_ID,
- BASE_VERSION + 1, INT64_FIELDS_END - 1);
- switch (int64_field) {
- ENUM_CASE(SERVER_VERSION);
- ENUM_CASE(SERVER_POSITION_IN_PARENT);
- ENUM_CASE(LOCAL_EXTERNAL_ID);
- case INT64_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetTimeFieldString(TimeField time_field) {
- ASSERT_ENUM_BOUNDS(SERVER_VERSION, LOCAL_EXTERNAL_ID,
- BASE_VERSION + 1, INT64_FIELDS_END - 1);
- switch (time_field) {
- ENUM_CASE(MTIME);
- ENUM_CASE(SERVER_MTIME);
- ENUM_CASE(CTIME);
- ENUM_CASE(SERVER_CTIME);
- case TIME_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIdFieldString(IdField id_field) {
- ASSERT_ENUM_BOUNDS(ID, NEXT_ID,
- ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
- switch (id_field) {
- ENUM_CASE(ID);
- ENUM_CASE(PARENT_ID);
- ENUM_CASE(SERVER_PARENT_ID);
- ENUM_CASE(PREV_ID);
- ENUM_CASE(NEXT_ID);
- case ID_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIndexedBitFieldString(IndexedBitField indexed_bit_field) {
- ASSERT_ENUM_BOUNDS(IS_UNSYNCED, IS_UNAPPLIED_UPDATE,
- BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
- switch (indexed_bit_field) {
- ENUM_CASE(IS_UNSYNCED);
- ENUM_CASE(IS_UNAPPLIED_UPDATE);
- case INDEXED_BIT_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetIsDelFieldString(IsDelField is_del_field) {
- ASSERT_ENUM_BOUNDS(IS_DEL, IS_DEL,
- INDEXED_BIT_FIELDS_END, IS_DIR - 1);
- switch (is_del_field) {
- ENUM_CASE(IS_DEL);
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBitFieldString(BitField bit_field) {
- ASSERT_ENUM_BOUNDS(IS_DIR, SERVER_IS_DEL,
- IS_DEL + 1, BIT_FIELDS_END - 1);
- switch (bit_field) {
- ENUM_CASE(IS_DIR);
- ENUM_CASE(SERVER_IS_DIR);
- ENUM_CASE(SERVER_IS_DEL);
- case BIT_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetStringFieldString(StringField string_field) {
- ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_CLIENT_TAG,
- STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
- switch (string_field) {
- ENUM_CASE(NON_UNIQUE_NAME);
- ENUM_CASE(SERVER_NON_UNIQUE_NAME);
- ENUM_CASE(UNIQUE_SERVER_TAG);
- ENUM_CASE(UNIQUE_CLIENT_TAG);
- case STRING_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetProtoFieldString(ProtoField proto_field) {
- ASSERT_ENUM_BOUNDS(SPECIFICS, BASE_SERVER_SPECIFICS,
- PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
- switch (proto_field) {
- ENUM_CASE(SPECIFICS);
- ENUM_CASE(SERVER_SPECIFICS);
- ENUM_CASE(BASE_SERVER_SPECIFICS);
- case PROTO_FIELDS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-const char* GetBitTempString(BitTemp bit_temp) {
- ASSERT_ENUM_BOUNDS(SYNCING, SYNCING,
- BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
- switch (bit_temp) {
- ENUM_CASE(SYNCING);
- case BIT_TEMPS_END: break;
- }
- NOTREACHED();
- return "";
-}
-
-#undef ENUM_CASE
-#undef ASSERT_ENUM_BOUNDS
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_enum_conversions.h b/chrome/browser/sync/syncable/syncable_enum_conversions.h
deleted file mode 100644
index b48cc28..0000000
--- a/chrome/browser/sync/syncable/syncable_enum_conversions.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
-#pragma once
-
-// Keep this file in sync with syncable.h.
-
-#include "chrome/browser/sync/syncable/syncable.h"
-
-// Utility functions to get the string equivalent for some syncable
-// enums.
-
-namespace syncable {
-
-// The returned strings (which don't have to be freed) are in ASCII.
-// The result of passing in an invalid enum value is undefined.
-
-const char* GetMetahandleFieldString(MetahandleField metahandle_field);
-
-const char* GetBaseVersionString(BaseVersion base_version);
-
-const char* GetInt64FieldString(Int64Field int64_field);
-
-const char* GetTimeFieldString(TimeField time_field);
-
-const char* GetIdFieldString(IdField id_field);
-
-const char* GetIndexedBitFieldString(IndexedBitField indexed_bit_field);
-
-const char* GetIsDelFieldString(IsDelField is_del_field);
-
-const char* GetBitFieldString(BitField bit_field);
-
-const char* GetStringFieldString(StringField string_field);
-
-const char* GetProtoFieldString(ProtoField proto_field);
-
-const char* GetBitTempString(BitTemp bit_temp);
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ENUM_CONVERSIONS_H_
diff --git a/chrome/browser/sync/syncable/syncable_enum_conversions_unittest.cc b/chrome/browser/sync/syncable/syncable_enum_conversions_unittest.cc
deleted file mode 100644
index 4d55634..0000000
--- a/chrome/browser/sync/syncable/syncable_enum_conversions_unittest.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Keep this file in sync with syncable.h.
-
-#include "chrome/browser/sync/syncable/syncable_enum_conversions.h"
-
-#include <string>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace syncable {
-namespace {
-
-class SyncableEnumConversionsTest : public testing::Test {
-};
-
-template <class T>
-void TestEnumStringFunction(const char* (*enum_string_fn)(T),
- int enum_min, int enum_max) {
- EXPECT_LE(enum_min, enum_max);
- for (int i = enum_min; i <= enum_max; ++i) {
- const std::string& str = enum_string_fn(static_cast<T>(i));
- EXPECT_FALSE(str.empty());
- }
-}
-
-TEST_F(SyncableEnumConversionsTest, GetMetahandleFieldString) {
- TestEnumStringFunction(
- GetMetahandleFieldString, INT64_FIELDS_BEGIN, META_HANDLE);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBaseVersionString) {
- TestEnumStringFunction(
- GetBaseVersionString, META_HANDLE + 1, BASE_VERSION);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetInt64FieldString) {
- TestEnumStringFunction(
- GetInt64FieldString, BASE_VERSION + 1, INT64_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetTimeFieldString) {
- TestEnumStringFunction(
- GetTimeFieldString, TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIdFieldString) {
- TestEnumStringFunction(
- GetIdFieldString, ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIndexedBitFieldString) {
- TestEnumStringFunction(
- GetIndexedBitFieldString, BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetIsDelFieldString) {
- TestEnumStringFunction(
- GetIsDelFieldString, INDEXED_BIT_FIELDS_END, IS_DEL);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBitFieldString) {
- TestEnumStringFunction(
- GetBitFieldString, IS_DEL + 1, BIT_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetStringFieldString) {
- TestEnumStringFunction(
- GetStringFieldString, STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetProtoFieldString) {
- TestEnumStringFunction(
- GetProtoFieldString, PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
-}
-
-TEST_F(SyncableEnumConversionsTest, GetBitTempString) {
- TestEnumStringFunction(
- GetBitTempString, BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
-}
-
-} // namespace
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_id.cc b/chrome/browser/sync/syncable/syncable_id.cc
deleted file mode 100644
index a4381f9..0000000
--- a/chrome/browser/sync/syncable/syncable_id.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/syncable_id.h"
-
-#include <iosfwd>
-
-#include "base/string_util.h"
-#include "base/values.h"
-
-using std::ostream;
-using std::string;
-
-namespace syncable {
-
-ostream& operator<<(ostream& out, const Id& id) {
- out << id.s_;
- return out;
-}
-
-StringValue* Id::ToValue() const {
- return Value::CreateStringValue(s_);
-}
-
-string Id::GetServerId() const {
- // Currently root is the string "0". We need to decide on a true value.
- // "" would be convenient here, as the IsRoot call would not be needed.
- if (IsRoot())
- return "0";
- return s_.substr(1);
-}
-
-Id Id::CreateFromServerId(const string& server_id) {
- Id id;
- if (server_id == "0")
- id.s_ = "r";
- else
- id.s_ = string("s") + server_id;
- return id;
-}
-
-Id Id::CreateFromClientString(const string& local_id) {
- Id id;
- if (local_id == "0")
- id.s_ = "r";
- else
- id.s_ = string("c") + local_id;
- return id;
-}
-
-Id Id::GetLexicographicSuccessor() const {
- // The successor of a string is given by appending the least
- // character in the alphabet.
- Id id = *this;
- id.s_.push_back(0);
- return id;
-}
-
-bool Id::ContainsStringCaseInsensitive(
- const std::string& lowercase_query) const {
- DCHECK_EQ(StringToLowerASCII(lowercase_query), lowercase_query);
- return StringToLowerASCII(s_).find(lowercase_query) != std::string::npos;
-}
-
-// static
-Id Id::GetLeastIdForLexicographicComparison() {
- Id id;
- id.s_.clear();
- return id;
-}
-
-Id GetNullId() {
- return Id(); // Currently == root.
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_id.h b/chrome/browser/sync/syncable/syncable_id.h
deleted file mode 100644
index dcd4f99..0000000
--- a/chrome/browser/sync/syncable/syncable_id.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
-#pragma once
-
-#include <iosfwd>
-#include <limits>
-#include <sstream>
-#include <string>
-
-#include "base/hash_tables.h"
-
-class MockConnectionManager;
-
-namespace base {
-class StringValue;
-}
-
-namespace sql {
-class Statement;
-}
-
-namespace syncable {
-struct EntryKernel;
-class Id;
-}
-
-namespace syncable {
-
-std::ostream& operator<<(std::ostream& out, const Id& id);
-
-// For historical reasons, 3 concepts got everloaded into the Id:
-// 1. A unique, opaque identifier for the object.
-// 2. Flag specifing whether server know about this object.
-// 3. Flag for root.
-//
-// We originally wrapped an integer for this information, but now we use a
-// string. It will have one of three forms:
-// 1. c<client only opaque id> for client items that have not been committed.
-// 2. r for the root item.
-// 3. s<server provided opaque id> for items that the server knows about.
-class Id {
- public:
- // This constructor will be handy even when we move away from int64s, just
- // for unit tests.
- inline Id() : s_("r") { }
- inline Id(const Id& that) {
- Copy(that);
- }
- inline Id& operator = (const Id& that) {
- Copy(that);
- return *this;
- }
- inline void Copy(const Id& that) {
- this->s_ = that.s_;
- }
- inline bool IsRoot() const {
- return "r" == s_;
- }
- inline bool ServerKnows() const {
- return s_[0] == 's' || s_ == "r";
- }
-
- // TODO(sync): We could use null here, but to ease conversion we use "r".
- // fix this, this is madness :)
- inline bool IsNull() const {
- return IsRoot();
- }
- inline void Clear() {
- s_ = "r";
- }
- inline int compare(const Id& that) const {
- return s_.compare(that.s_);
- }
- inline bool operator == (const Id& that) const {
- return s_ == that.s_;
- }
- inline bool operator != (const Id& that) const {
- return s_ != that.s_;
- }
- inline bool operator < (const Id& that) const {
- return s_ < that.s_;
- }
- inline bool operator > (const Id& that) const {
- return s_ > that.s_;
- }
-
- const std::string& value() const {
- return s_;
- }
-
- // Return the next highest ID in the lexicographic ordering. This is
- // useful for computing upper bounds on std::sets that are ordered
- // by operator<.
- Id GetLexicographicSuccessor() const;
-
- // Note: |lowercase_query| should be passed in as lower case.
- bool ContainsStringCaseInsensitive(const std::string& lowercase_query) const;
-
- // Dumps the ID as a value and returns it. Transfers ownership of
- // the StringValue to the caller.
- base::StringValue* ToValue() const;
-
- // Three functions are used to work with our proto buffers.
- std::string GetServerId() const;
- static Id CreateFromServerId(const std::string& server_id);
- // This should only be used if you get back a reference to a local
- // id from the server. Returns a client only opaque id.
- static Id CreateFromClientString(const std::string& local_id);
-
- // This method returns an ID that will compare less than any valid ID.
- // The returned ID is not a valid ID itself. This is useful for
- // computing lower bounds on std::sets that are ordered by operator<.
- static Id GetLeastIdForLexicographicComparison();
-
- private:
- friend EntryKernel* UnpackEntry(sql::Statement* statement);
- friend void BindFields(const EntryKernel& entry,
- sql::Statement* statement);
- friend std::ostream& operator<<(std::ostream& out, const Id& id);
- friend class MockConnectionManager;
- friend class SyncableIdTest;
-
- std::string s_;
-};
-
-Id GetNullId();
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
diff --git a/chrome/browser/sync/syncable/syncable_id_unittest.cc b/chrome/browser/sync/syncable/syncable_id_unittest.cc
deleted file mode 100644
index 0349195..0000000
--- a/chrome/browser/sync/syncable/syncable_id_unittest.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/syncable_id.h"
-
-#include <vector>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/test/values_test_util.h"
-#include "base/values.h"
-#include "chrome/browser/sync/test/engine/test_id_factory.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using std::vector;
-
-namespace syncable {
-
-using browser_sync::TestIdFactory;
-
-class SyncableIdTest : public testing::Test { };
-
-TEST(SyncableIdTest, TestIDCreation) {
- vector<Id> v;
- v.push_back(TestIdFactory::FromNumber(5));
- v.push_back(TestIdFactory::FromNumber(1));
- v.push_back(TestIdFactory::FromNumber(-5));
- v.push_back(TestIdFactory::MakeLocal("A"));
- v.push_back(TestIdFactory::MakeLocal("B"));
- v.push_back(TestIdFactory::MakeServer("A"));
- v.push_back(TestIdFactory::MakeServer("B"));
- v.push_back(Id::CreateFromServerId("-5"));
- v.push_back(Id::CreateFromClientString("A"));
- v.push_back(Id::CreateFromServerId("A"));
-
- for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
- for (vector<Id>::iterator j = v.begin(); j != i; ++j) {
- ASSERT_NE(*i, *j) << "mis equated two distinct ids";
- }
- ASSERT_EQ(*i, *i) << "self-equality failed";
- Id copy1 = *i;
- Id copy2 = *i;
- ASSERT_EQ(copy1, copy2) << "equality after copy failed";
- }
-}
-
-TEST(SyncableIdTest, GetLeastIdForLexicographicComparison) {
- vector<Id> v;
- v.push_back(Id::CreateFromServerId("z5"));
- v.push_back(Id::CreateFromServerId("z55"));
- v.push_back(Id::CreateFromServerId("z6"));
- v.push_back(Id::CreateFromClientString("zA-"));
- v.push_back(Id::CreateFromClientString("zA--"));
- v.push_back(Id::CreateFromServerId("zA--"));
-
- for (int i = 0; i <= 255; ++i) {
- std::string one_character_id;
- one_character_id.push_back(i);
- v.push_back(Id::CreateFromClientString(one_character_id));
- }
-
- for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
- // The following looks redundant, but we're testing a custom operator<.
- ASSERT_LT(Id::GetLeastIdForLexicographicComparison(), *i);
- ASSERT_NE(*i, i->GetLexicographicSuccessor());
- ASSERT_NE(i->GetLexicographicSuccessor(), *i);
- ASSERT_LT(*i, i->GetLexicographicSuccessor());
- ASSERT_GT(i->GetLexicographicSuccessor(), *i);
- for (vector<Id>::iterator j = v.begin(); j != v.end(); ++j) {
- if (j == i)
- continue;
- if (*j < *i) {
- ASSERT_LT(j->GetLexicographicSuccessor(), *i);
- ASSERT_LT(j->GetLexicographicSuccessor(),
- i->GetLexicographicSuccessor());
- ASSERT_LT(*j, i->GetLexicographicSuccessor());
- } else {
- ASSERT_GT(j->GetLexicographicSuccessor(), *i);
- ASSERT_GT(j->GetLexicographicSuccessor(),
- i->GetLexicographicSuccessor());
- ASSERT_GT(*j, i->GetLexicographicSuccessor());
- }
- }
- }
-}
-
-TEST(SyncableIdTest, ToValue) {
- base::ExpectStringValue("r", Id::CreateFromServerId("0").ToValue());
- base::ExpectStringValue("svalue", Id::CreateFromServerId("value").ToValue());
-
- base::ExpectStringValue("r", Id::CreateFromClientString("0").ToValue());
- base::ExpectStringValue("cvalue",
- Id::CreateFromClientString("value").ToValue());
-}
-
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_mock.cc b/chrome/browser/sync/syncable/syncable_mock.cc
deleted file mode 100644
index 1fb55fa..0000000
--- a/chrome/browser/sync/syncable/syncable_mock.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/syncable_mock.h"
-
-#include "base/location.h"
-#include "chrome/browser/sync/test/null_transaction_observer.h"
-
-MockDirectory::MockDirectory(browser_sync::UnrecoverableErrorHandler* handler)
- : Directory(&encryptor_, handler, NULL) {
- InitKernelForTest("myk", &delegate_, syncable::NullTransactionObserver());
-}
-
-MockDirectory::~MockDirectory() {}
-
-MockSyncableWriteTransaction::MockSyncableWriteTransaction(
- const tracked_objects::Location& from_here, Directory *directory)
- : WriteTransaction(from_here, syncable::UNITTEST, directory) {
-}
diff --git a/chrome/browser/sync/syncable/syncable_mock.h b/chrome/browser/sync/syncable/syncable_mock.h
deleted file mode 100644
index 1f8ed48..0000000
--- a/chrome/browser/sync/syncable/syncable_mock.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_MOCK_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_MOCK_H_
-#pragma once
-
-#include <string>
-
-#include "chrome/browser/sync/syncable/syncable.h"
-#include "chrome/browser/sync/test/fake_encryptor.h"
-#include "chrome/browser/sync/test/null_directory_change_delegate.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using syncable::Directory;
-using syncable::EntryKernel;
-
-class MockDirectory : public Directory {
- public:
- explicit MockDirectory(browser_sync::UnrecoverableErrorHandler* handler);
- virtual ~MockDirectory();
-
- MOCK_METHOD1(GetEntryByHandle, syncable::EntryKernel*(int64));
-
- MOCK_METHOD2(set_last_downloadstamp, void(syncable::ModelType, int64));
-
- MOCK_METHOD1(GetEntryByClientTag,
- syncable::EntryKernel*(const std::string&));
-
- MOCK_METHOD1(PurgeEntriesWithTypeIn, void(syncable::ModelTypeSet));
-
- private:
- browser_sync::FakeEncryptor encryptor_;
- syncable::NullDirectoryChangeDelegate delegate_;
-};
-
-class MockSyncableWriteTransaction : public syncable::WriteTransaction {
- public:
- MockSyncableWriteTransaction(
- const tracked_objects::Location& from_here, Directory *directory);
-};
-
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_MOCK_H_
-
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
deleted file mode 100644
index 282fa7f..0000000
--- a/chrome/browser/sync/syncable/syncable_unittest.cc
+++ /dev/null
@@ -1,1743 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/sync/syncable/syncable.h"
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/file_path.h"
-#include "base/file_util.h"
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/scoped_temp_dir.h"
-#include "base/stringprintf.h"
-#include "base/synchronization/condition_variable.h"
-#include "base/test/values_test_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/values.h"
-#include "chrome/browser/sync/engine/syncproto.h"
-#include "chrome/browser/sync/internal_api/includes/test_unrecoverable_error_handler.h"
-#include "chrome/browser/sync/syncable/directory_backing_store.h"
-#include "chrome/browser/sync/syncable/directory_change_delegate.h"
-#include "chrome/browser/sync/syncable/on_disk_directory_backing_store.h"
-#include "chrome/browser/sync/test/engine/test_id_factory.h"
-#include "chrome/browser/sync/test/engine/test_syncable_utils.h"
-#include "chrome/browser/sync/test/fake_encryptor.h"
-#include "chrome/browser/sync/test/null_directory_change_delegate.h"
-#include "chrome/browser/sync/test/null_transaction_observer.h"
-#include "sync/protocol/bookmark_specifics.pb.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/sqlite/sqlite3.h"
-
-using base::ExpectDictBooleanValue;
-using base::ExpectDictStringValue;
-using browser_sync::FakeEncryptor;
-using browser_sync::TestIdFactory;
-using browser_sync::TestUnrecoverableErrorHandler;
-
-namespace syncable {
-
-class SyncableKernelTest : public testing::Test {};
-
-// TODO(akalin): Add unit tests for EntryKernel::ContainsString().
-
-TEST_F(SyncableKernelTest, ToValue) {
- EntryKernel kernel;
- scoped_ptr<DictionaryValue> value(kernel.ToValue());
- if (value.get()) {
- // Not much to check without repeating the ToValue() code.
- EXPECT_TRUE(value->HasKey("isDirty"));
- // The extra +2 is for "isDirty" and "serverModelType".
- EXPECT_EQ(BIT_TEMPS_END - BEGIN_FIELDS + 2,
- static_cast<int>(value->size()));
- } else {
- ADD_FAILURE();
- }
-}
-
-namespace {
-void PutDataAsBookmarkFavicon(WriteTransaction* wtrans,
- MutableEntry* e,
- const char* bytes,
- size_t bytes_length) {
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_bookmark()->set_url("http://demo/");
- specifics.mutable_bookmark()->set_favicon(bytes, bytes_length);
- e->Put(SPECIFICS, specifics);
-}
-
-void ExpectDataFromBookmarkFaviconEquals(BaseTransaction* trans,
- Entry* e,
- const char* bytes,
- size_t bytes_length) {
- ASSERT_TRUE(e->good());
- ASSERT_TRUE(e->Get(SPECIFICS).has_bookmark());
- ASSERT_EQ("http://demo/", e->Get(SPECIFICS).bookmark().url());
- ASSERT_EQ(std::string(bytes, bytes_length),
- e->Get(SPECIFICS).bookmark().favicon());
-}
-} // namespace
-
-class SyncableGeneralTest : public testing::Test {
- public:
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- db_path_ = temp_dir_.path().Append(
- FILE_PATH_LITERAL("SyncableTest.sqlite3"));
- }
-
- virtual void TearDown() {
- }
- protected:
- MessageLoop message_loop_;
- ScopedTempDir temp_dir_;
- NullDirectoryChangeDelegate delegate_;
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- FilePath db_path_;
-};
-
-TEST_F(SyncableGeneralTest, General) {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- int64 root_metahandle;
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, rtrans.root_id());
- ASSERT_TRUE(e.good());
- root_metahandle = e.Get(META_HANDLE);
- }
-
- int64 written_metahandle;
- const Id id = TestIdFactory::FromNumber(99);
- std::string name = "Jeff";
- // Test simple read operations on an empty DB.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_FALSE(e.good()); // Hasn't been written yet.
-
- Directory::ChildHandles child_handles;
- dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
- EXPECT_TRUE(child_handles.empty());
-
- dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
- EXPECT_TRUE(child_handles.empty());
- }
-
- // Test creating a new meta entry.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
- written_metahandle = me.Get(META_HANDLE);
- }
-
- // Test GetChildHandles* after something is now in the DB.
- // Also check that GET_BY_ID works.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- Directory::ChildHandles child_handles;
- dir.GetChildHandlesById(&rtrans, rtrans.root_id(), &child_handles);
- EXPECT_EQ(1u, child_handles.size());
-
- for (Directory::ChildHandles::iterator i = child_handles.begin();
- i != child_handles.end(); ++i) {
- EXPECT_EQ(*i, written_metahandle);
- }
-
- dir.GetChildHandlesByHandle(&rtrans, root_metahandle, &child_handles);
- EXPECT_EQ(1u, child_handles.size());
-
- for (Directory::ChildHandles::iterator i = child_handles.begin();
- i != child_handles.end(); ++i) {
- EXPECT_EQ(*i, written_metahandle);
- }
- }
-
- // Test writing data to an entity. Also check that GET_BY_HANDLE works.
- static const char s[] = "Hello World.";
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(e.good());
- PutDataAsBookmarkFavicon(&trans, &e, s, sizeof(s));
- }
-
- // Test reading back the contents that we just wrote.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(e.good());
- ExpectDataFromBookmarkFaviconEquals(&trans, &e, s, sizeof(s));
- }
-
- // Verify it exists in the folder.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- EXPECT_EQ(1, CountEntriesWithName(&rtrans, rtrans.root_id(), name));
- }
-
- // Now delete it.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, &dir);
- MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
- e.Put(IS_DEL, true);
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), name));
- }
-
- dir.SaveChanges();
-}
-
-TEST_F(SyncableGeneralTest, ChildrenOps) {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- int64 written_metahandle;
- const Id id = TestIdFactory::FromNumber(99);
- std::string name = "Jeff";
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_FALSE(e.good()); // Hasn't been written yet.
-
- EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
- }
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
- written_metahandle = me.Get(META_HANDLE);
- }
-
- // Test children ops after something is now in the DB.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- Entry child(&rtrans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(child.good());
-
- EXPECT_TRUE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_EQ(e.Get(ID), child_id);
- }
-
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, GET_BY_HANDLE, written_metahandle);
- ASSERT_TRUE(me.good());
- me.Put(IS_DEL, true);
- }
-
- // Test children ops after the children have been deleted.
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- ASSERT_TRUE(e.good());
-
- EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
- }
-
- dir.SaveChanges();
-}
-
-TEST_F(SyncableGeneralTest, ClientIndexRebuildsProperly) {
- int64 written_metahandle;
- TestIdFactory factory;
- const Id id = factory.NewServerId();
- std::string name = "cheesepuffs";
- std::string tag = "dietcoke";
-
- // Test creating a new meta entry.
- {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
- NullTransactionObserver()));
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
- me.Put(UNIQUE_CLIENT_TAG, tag);
- written_metahandle = me.Get(META_HANDLE);
- }
- dir.SaveChanges();
- }
-
- // The DB was closed. Now reopen it. This will cause index regeneration.
- {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest",
- &delegate_, NullTransactionObserver()));
-
- ReadTransaction trans(FROM_HERE, &dir);
- Entry me(&trans, GET_BY_CLIENT_TAG, tag);
- ASSERT_TRUE(me.good());
- EXPECT_EQ(me.Get(ID), id);
- EXPECT_EQ(me.Get(BASE_VERSION), 1);
- EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
- EXPECT_EQ(me.Get(META_HANDLE), written_metahandle);
- }
-}
-
-TEST_F(SyncableGeneralTest, ClientIndexRebuildsDeletedProperly) {
- TestIdFactory factory;
- const Id id = factory.NewServerId();
- std::string tag = "dietcoke";
-
- // Test creating a deleted, unsynced, server meta entry.
- {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
- NullTransactionObserver()));
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), "deleted");
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
- me.Put(UNIQUE_CLIENT_TAG, tag);
- me.Put(IS_DEL, true);
- me.Put(IS_UNSYNCED, true); // Or it might be purged.
- }
- dir.SaveChanges();
- }
-
- // The DB was closed. Now reopen it. This will cause index regeneration.
- // Should still be present and valid in the client tag index.
- {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.Open(db_path_, "IndexTest", &delegate_,
- NullTransactionObserver()));
-
- ReadTransaction trans(FROM_HERE, &dir);
- Entry me(&trans, GET_BY_CLIENT_TAG, tag);
- ASSERT_TRUE(me.good());
- EXPECT_EQ(me.Get(ID), id);
- EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), tag);
- EXPECT_TRUE(me.Get(IS_DEL));
- EXPECT_TRUE(me.Get(IS_UNSYNCED));
- }
-}
-
-TEST_F(SyncableGeneralTest, ToValue) {
- Directory dir(&encryptor_, &handler_, NULL);
- ASSERT_EQ(OPENED, dir.OpenInMemoryForTest(
- "SimpleTest", &delegate_, NullTransactionObserver()));
-
- const Id id = TestIdFactory::FromNumber(99);
- {
- ReadTransaction rtrans(FROM_HERE, &dir);
- Entry e(&rtrans, GET_BY_ID, id);
- EXPECT_FALSE(e.good()); // Hasn't been written yet.
-
- scoped_ptr<DictionaryValue> value(e.ToValue());
- ExpectDictBooleanValue(false, *value, "good");
- EXPECT_EQ(1u, value->size());
- }
-
- // Test creating a new meta entry.
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), "new");
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
-
- scoped_ptr<DictionaryValue> value(me.ToValue());
- ExpectDictBooleanValue(true, *value, "good");
- EXPECT_TRUE(value->HasKey("kernel"));
- ExpectDictStringValue("Unspecified", *value, "modelType");
- ExpectDictBooleanValue(true, *value, "existsOnClientBecauseNameIsNonEmpty");
- ExpectDictBooleanValue(false, *value, "isRoot");
- }
-
- dir.SaveChanges();
-}
-
-// A Directory whose backing store always fails SaveChanges by returning false.
-class TestUnsaveableDirectory : public Directory {
- public:
- TestUnsaveableDirectory() : Directory(&encryptor_, &handler_, NULL) {}
-
- class UnsaveableBackingStore : public OnDiskDirectoryBackingStore {
- public:
- UnsaveableBackingStore(const std::string& dir_name,
- const FilePath& backing_filepath)
- : OnDiskDirectoryBackingStore(dir_name, backing_filepath) { }
- virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot) {
- return false;
- }
- };
-
- DirOpenResult OpenUnsaveable(
- const FilePath& file_path, const std::string& name,
- DirectoryChangeDelegate* delegate,
- const browser_sync::WeakHandle<TransactionObserver>&
- transaction_observer) {
- DirectoryBackingStore *store = new UnsaveableBackingStore(name, file_path);
- DirOpenResult result =
- OpenImpl(store, name, delegate, transaction_observer);
- if (OPENED != result)
- Close();
- return result;
- }
-
- private:
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
-};
-
-// A test fixture for syncable::Directory. Uses an in-memory database to keep
-// the unit tests fast.
-class SyncableDirectoryTest : public testing::Test {
- protected:
- MessageLoop message_loop_;
- static const char kName[];
- static const Id kId;
-
- virtual void SetUp() {
- dir_.reset(new Directory(&encryptor_, &handler_, NULL));
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->OpenInMemoryForTest(kName, &delegate_,
- NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
- }
-
- virtual void TearDown() {
- dir_->SaveChanges();
- dir_.reset();
- }
-
- void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result) {
- dir_->GetAllMetaHandles(trans, result);
- }
-
- bool IsInDirtyMetahandles(int64 metahandle) {
- return 1 == dir_->kernel_->dirty_metahandles->count(metahandle);
- }
-
- bool IsInMetahandlesToPurge(int64 metahandle) {
- return 1 == dir_->kernel_->metahandles_to_purge->count(metahandle);
- }
-
- void CheckPurgeEntriesWithTypeInSucceeded(ModelTypeSet types_to_purge,
- bool before_reload) {
- SCOPED_TRACE(testing::Message("Before reload: ") << before_reload);
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- MetahandleSet all_set;
- dir_->GetAllMetaHandles(&trans, &all_set);
- EXPECT_EQ(3U, all_set.size());
- if (before_reload)
- EXPECT_EQ(4U, dir_->kernel_->metahandles_to_purge->size());
- for (MetahandleSet::iterator iter = all_set.begin();
- iter != all_set.end(); ++iter) {
- Entry e(&trans, GET_BY_HANDLE, *iter);
- const ModelType local_type = e.GetModelType();
- const ModelType server_type = e.GetServerModelType();
-
- // Note the dance around incrementing |it|, since we sometimes erase().
- if ((IsRealDataType(local_type) &&
- types_to_purge.Has(local_type)) ||
- (IsRealDataType(server_type) &&
- types_to_purge.Has(server_type))) {
- FAIL() << "Illegal type should have been deleted.";
- }
- }
- }
-
- for (ModelTypeSet::Iterator it = types_to_purge.First();
- it.Good(); it.Inc()) {
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(it.Get()));
- }
- EXPECT_FALSE(types_to_purge.Has(BOOKMARKS));
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(BOOKMARKS));
- }
-
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- scoped_ptr<Directory> dir_;
- NullDirectoryChangeDelegate delegate_;
-
- // Creates an empty entry and sets the ID field to the default kId.
- void CreateEntry(const std::string& entryname) {
- CreateEntry(entryname, kId);
- }
-
- // Creates an empty entry and sets the ID field to id.
- void CreateEntry(const std::string& entryname, const int id) {
- CreateEntry(entryname, TestIdFactory::FromNumber(id));
- }
- void CreateEntry(const std::string& entryname, Id id) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), entryname);
- ASSERT_TRUE(me.good());
- me.Put(ID, id);
- me.Put(IS_UNSYNCED, true);
- }
-
- void ValidateEntry(BaseTransaction* trans,
- int64 id,
- bool check_name,
- const std::string& name,
- int64 base_version,
- int64 server_version,
- bool is_del);
-};
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) {
- const int metas_to_create = 50;
- MetahandleSet expected_purges;
- MetahandleSet all_handles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metas_to_create; i++) {
- MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
- e.Put(IS_UNSYNCED, true);
- sync_pb::EntitySpecifics specs;
- if (i % 2 == 0) {
- AddDefaultFieldValue(BOOKMARKS, &specs);
- expected_purges.insert(e.Get(META_HANDLE));
- all_handles.insert(e.Get(META_HANDLE));
- } else {
- AddDefaultFieldValue(PREFERENCES, &specs);
- all_handles.insert(e.Get(META_HANDLE));
- }
- e.Put(SPECIFICS, specs);
- e.Put(SERVER_SPECIFICS, specs);
- }
- }
-
- syncable::ModelTypeSet to_purge(BOOKMARKS);
- dir_->PurgeEntriesWithTypeIn(to_purge);
-
- Directory::SaveChangesSnapshot snapshot1;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot1);
- EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge);
-
- to_purge.Clear();
- to_purge.Put(PREFERENCES);
- dir_->PurgeEntriesWithTypeIn(to_purge);
-
- dir_->HandleSaveChangesFailure(snapshot1);
-
- Directory::SaveChangesSnapshot snapshot2;
- dir_->TakeSnapshotForSaveChanges(&snapshot2);
- EXPECT_TRUE(all_handles == snapshot2.metahandles_to_purge);
-}
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
- const int metahandles_to_create = 100;
- std::vector<int64> expected_dirty_metahandles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metahandles_to_create; i++) {
- MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
- expected_dirty_metahandles.push_back(e.Get(META_HANDLE));
- e.Put(IS_UNSYNCED, true);
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each new metahandle. Make sure all
- // entries are marked dirty.
- ASSERT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- ASSERT_TRUE(i->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
- // Put a new value with existing transactions as well as adding new ones.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- std::vector<int64> new_dirty_metahandles;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Change existing entries to directories to dirty them.
- MutableEntry e1(&trans, GET_BY_HANDLE, *i);
- e1.Put(IS_DIR, true);
- e1.Put(IS_UNSYNCED, true);
- // Add new entries
- MutableEntry e2(&trans, CREATE, trans.root_id(), "bar");
- e2.Put(IS_UNSYNCED, true);
- new_dirty_metahandles.push_back(e2.Get(META_HANDLE));
- }
- expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
- new_dirty_metahandles.begin(), new_dirty_metahandles.end());
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each new metahandle. Make sure all
- // entries are marked dirty.
- EXPECT_EQ(expected_dirty_metahandles.size(), snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE(i->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
- const int metahandles_to_create = 100;
-
- // half of 2 * metahandles_to_create
- const unsigned int number_changed = 100u;
- std::vector<int64> expected_dirty_metahandles;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (int i = 0; i < metahandles_to_create; i++) {
- MutableEntry e(&trans, CREATE, trans.root_id(), "foo");
- expected_dirty_metahandles.push_back(e.Get(META_HANDLE));
- e.Put(IS_UNSYNCED, true);
- }
- }
- dir_->SaveChanges();
- // Put a new value with existing transactions as well as adding new ones.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- std::vector<int64> new_dirty_metahandles;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Change existing entries to directories to dirty them.
- MutableEntry e1(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e1.good());
- e1.Put(IS_DIR, true);
- e1.Put(IS_UNSYNCED, true);
- // Add new entries
- MutableEntry e2(&trans, CREATE, trans.root_id(), "bar");
- e2.Put(IS_UNSYNCED, true);
- new_dirty_metahandles.push_back(e2.Get(META_HANDLE));
- }
- expected_dirty_metahandles.insert(expected_dirty_metahandles.end(),
- new_dirty_metahandles.begin(), new_dirty_metahandles.end());
- }
- dir_->SaveChanges();
- // Don't make any changes whatsoever and ensure nothing comes back.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- MutableEntry e(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e.good());
- // We aren't doing anything to dirty these entries.
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there are no dirty_metahandles.
- EXPECT_EQ(0u, snapshot.dirty_metas.size());
- dir_->VacuumAfterSaveChanges(snapshot);
- }
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- bool should_change = false;
- for (std::vector<int64>::const_iterator i =
- expected_dirty_metahandles.begin();
- i != expected_dirty_metahandles.end(); ++i) {
- // Maybe change entries by flipping IS_DIR.
- MutableEntry e(&trans, GET_BY_HANDLE, *i);
- ASSERT_TRUE(e.good());
- should_change = !should_change;
- if (should_change) {
- bool not_dir = !e.Get(IS_DIR);
- e.Put(IS_DIR, not_dir);
- e.Put(IS_UNSYNCED, true);
- }
- }
- }
- // Fake SaveChanges() and make sure we got what we expected.
- {
- Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir_->kernel_->save_changes_mutex);
- dir_->TakeSnapshotForSaveChanges(&snapshot);
- // Make sure there's an entry for each changed metahandle. Make sure all
- // entries are marked dirty.
- EXPECT_EQ(number_changed, snapshot.dirty_metas.size());
- for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
- i != snapshot.dirty_metas.end(); ++i) {
- EXPECT_TRUE(i->is_dirty());
- }
- dir_->VacuumAfterSaveChanges(snapshot);
- }
-}
-
-const char SyncableDirectoryTest::kName[] = "Foo";
-const Id SyncableDirectoryTest::kId(TestIdFactory::FromNumber(-99));
-
-namespace {
-TEST_F(SyncableDirectoryTest, TestBasicLookupNonExistantID) {
- ReadTransaction rtrans(FROM_HERE, dir_.get());
- Entry e(&rtrans, GET_BY_ID, kId);
- ASSERT_FALSE(e.good());
-}
-
-TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
- CreateEntry("rtc");
- ReadTransaction rtrans(FROM_HERE, dir_.get());
- Entry e(&rtrans, GET_BY_ID, kId);
- ASSERT_TRUE(e.good());
-}
-
-TEST_F(SyncableDirectoryTest, TestDelete) {
- std::string name = "peanut butter jelly time";
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry e1(&trans, CREATE, trans.root_id(), name);
- ASSERT_TRUE(e1.good());
- ASSERT_TRUE(e1.Put(IS_DEL, true));
- MutableEntry e2(&trans, CREATE, trans.root_id(), name);
- ASSERT_TRUE(e2.good());
- ASSERT_TRUE(e2.Put(IS_DEL, true));
- MutableEntry e3(&trans, CREATE, trans.root_id(), name);
- ASSERT_TRUE(e3.good());
- ASSERT_TRUE(e3.Put(IS_DEL, true));
-
- ASSERT_TRUE(e1.Put(IS_DEL, false));
- ASSERT_TRUE(e2.Put(IS_DEL, false));
- ASSERT_TRUE(e3.Put(IS_DEL, false));
-
- ASSERT_TRUE(e1.Put(IS_DEL, true));
- ASSERT_TRUE(e2.Put(IS_DEL, true));
- ASSERT_TRUE(e3.Put(IS_DEL, true));
-}
-
-TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
- Directory::UnsyncedMetaHandles handles;
- int64 handle1, handle2;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e1(&trans, CREATE, trans.root_id(), "abba");
- ASSERT_TRUE(e1.good());
- handle1 = e1.Get(META_HANDLE);
- e1.Put(BASE_VERSION, 1);
- e1.Put(IS_DIR, true);
- e1.Put(ID, TestIdFactory::FromNumber(101));
-
- MutableEntry e2(&trans, CREATE, e1.Get(ID), "bread");
- ASSERT_TRUE(e2.good());
- handle2 = e2.Get(META_HANDLE);
- e2.Put(BASE_VERSION, 1);
- e2.Put(ID, TestIdFactory::FromNumber(102));
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e3.good());
- e3.Put(IS_UNSYNCED, true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle1 == handles[0]);
-
- MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(e4.good());
- e4.Put(IS_UNSYNCED, true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(2 == handles.size());
- if (handle1 == handles[0]) {
- ASSERT_TRUE(handle2 == handles[1]);
- } else {
- ASSERT_TRUE(handle2 == handles[0]);
- ASSERT_TRUE(handle1 == handles[1]);
- }
-
- MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e5.good());
- ASSERT_TRUE(e5.Get(IS_UNSYNCED));
- ASSERT_TRUE(e5.Put(IS_UNSYNCED, false));
- ASSERT_FALSE(e5.Get(IS_UNSYNCED));
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnsyncedMetaHandles(&trans, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle2 == handles[0]);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TestGetUnappliedUpdates) {
- Directory::UnappliedUpdateMetaHandles handles;
- int64 handle1, handle2;
- const syncable::FullModelTypeSet all_types =
- syncable::FullModelTypeSet::All();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e1(&trans, CREATE, trans.root_id(), "abba");
- ASSERT_TRUE(e1.good());
- handle1 = e1.Get(META_HANDLE);
- e1.Put(IS_UNAPPLIED_UPDATE, false);
- e1.Put(BASE_VERSION, 1);
- e1.Put(ID, TestIdFactory::FromNumber(101));
- e1.Put(IS_DIR, true);
-
- MutableEntry e2(&trans, CREATE, e1.Get(ID), "bread");
- ASSERT_TRUE(e2.good());
- handle2 = e2.Get(META_HANDLE);
- e2.Put(IS_UNAPPLIED_UPDATE, false);
- e2.Put(BASE_VERSION, 1);
- e2.Put(ID, TestIdFactory::FromNumber(102));
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(0 == handles.size());
-
- MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e3.good());
- e3.Put(IS_UNAPPLIED_UPDATE, true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle1 == handles[0]);
-
- MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(e4.good());
- e4.Put(IS_UNAPPLIED_UPDATE, true);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(2 == handles.size());
- if (handle1 == handles[0]) {
- ASSERT_TRUE(handle2 == handles[1]);
- } else {
- ASSERT_TRUE(handle2 == handles[0]);
- ASSERT_TRUE(handle1 == handles[1]);
- }
-
- MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e5.good());
- e5.Put(IS_UNAPPLIED_UPDATE, false);
- }
- dir_->SaveChanges();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- dir_->GetUnappliedUpdateMetaHandles(&trans, all_types, &handles);
- ASSERT_TRUE(1 == handles.size());
- ASSERT_TRUE(handle2 == handles[0]);
- }
-}
-
-
-TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
- // Try to evoke a check failure...
- TestIdFactory id_factory;
- int64 grandchild_handle;
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry parent(&wtrans, CREATE, id_factory.root(), "Bob");
- ASSERT_TRUE(parent.good());
- parent.Put(IS_DIR, true);
- parent.Put(ID, id_factory.NewServerId());
- parent.Put(BASE_VERSION, 1);
- MutableEntry child(&wtrans, CREATE, parent.Get(ID), "Bob");
- ASSERT_TRUE(child.good());
- child.Put(IS_DIR, true);
- child.Put(ID, id_factory.NewServerId());
- child.Put(BASE_VERSION, 1);
- MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), "Bob");
- ASSERT_TRUE(grandchild.good());
- grandchild.Put(ID, id_factory.NewServerId());
- grandchild.Put(BASE_VERSION, 1);
- ASSERT_TRUE(grandchild.Put(IS_DEL, true));
- MutableEntry twin(&wtrans, CREATE, child.Get(ID), "Bob");
- ASSERT_TRUE(twin.good());
- ASSERT_TRUE(twin.Put(IS_DEL, true));
- ASSERT_TRUE(grandchild.Put(IS_DEL, false));
-
- grandchild_handle = grandchild.Get(META_HANDLE);
- }
- dir_->SaveChanges();
- {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry grandchild(&wtrans, GET_BY_HANDLE, grandchild_handle);
- grandchild.Put(IS_DEL, true); // Used to CHECK fail here.
- }
-}
-
-static inline bool IsLegalNewParent(const Entry& a, const Entry& b) {
- return IsLegalNewParent(a.trans(), a.Get(ID), b.Get(ID));
-}
-
-TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
- TestIdFactory id_factory;
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- Entry root(&wtrans, GET_BY_ID, id_factory.root());
- ASSERT_TRUE(root.good());
- MutableEntry parent(&wtrans, CREATE, root.Get(ID), "Bob");
- ASSERT_TRUE(parent.good());
- parent.Put(IS_DIR, true);
- parent.Put(ID, id_factory.NewServerId());
- parent.Put(BASE_VERSION, 1);
- MutableEntry child(&wtrans, CREATE, parent.Get(ID), "Bob");
- ASSERT_TRUE(child.good());
- child.Put(IS_DIR, true);
- child.Put(ID, id_factory.NewServerId());
- child.Put(BASE_VERSION, 1);
- MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), "Bob");
- ASSERT_TRUE(grandchild.good());
- grandchild.Put(ID, id_factory.NewServerId());
- grandchild.Put(BASE_VERSION, 1);
-
- MutableEntry parent2(&wtrans, CREATE, root.Get(ID), "Pete");
- ASSERT_TRUE(parent2.good());
- parent2.Put(IS_DIR, true);
- parent2.Put(ID, id_factory.NewServerId());
- parent2.Put(BASE_VERSION, 1);
- MutableEntry child2(&wtrans, CREATE, parent2.Get(ID), "Pete");
- ASSERT_TRUE(child2.good());
- child2.Put(IS_DIR, true);
- child2.Put(ID, id_factory.NewServerId());
- child2.Put(BASE_VERSION, 1);
- MutableEntry grandchild2(&wtrans, CREATE, child2.Get(ID), "Pete");
- ASSERT_TRUE(grandchild2.good());
- grandchild2.Put(ID, id_factory.NewServerId());
- grandchild2.Put(BASE_VERSION, 1);
- // resulting tree
- // root
- // / |
- // parent parent2
- // | |
- // child child2
- // | |
- // grandchild grandchild2
- ASSERT_TRUE(IsLegalNewParent(child, root));
- ASSERT_TRUE(IsLegalNewParent(child, parent));
- ASSERT_FALSE(IsLegalNewParent(child, child));
- ASSERT_FALSE(IsLegalNewParent(child, grandchild));
- ASSERT_TRUE(IsLegalNewParent(child, parent2));
- ASSERT_TRUE(IsLegalNewParent(child, grandchild2));
- ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
- ASSERT_FALSE(IsLegalNewParent(root, grandchild));
- ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
-}
-
-TEST_F(SyncableDirectoryTest, TestEntryIsInFolder) {
- // Create a subdir and an entry.
- int64 entry_handle;
- syncable::Id folder_id;
- syncable::Id entry_id;
- std::string entry_name = "entry";
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, trans.root_id(), "folder");
- ASSERT_TRUE(folder.good());
- EXPECT_TRUE(folder.Put(IS_DIR, true));
- EXPECT_TRUE(folder.Put(IS_UNSYNCED, true));
- folder_id = folder.Get(ID);
-
- MutableEntry entry(&trans, CREATE, folder.Get(ID), entry_name);
- ASSERT_TRUE(entry.good());
- entry_handle = entry.Get(META_HANDLE);
- entry.Put(IS_UNSYNCED, true);
- entry_id = entry.Get(ID);
- }
-
- // Make sure we can find the entry in the folder.
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), entry_name));
- EXPECT_EQ(1, CountEntriesWithName(&trans, folder_id, entry_name));
-
- Entry entry(&trans, GET_BY_ID, entry_id);
- ASSERT_TRUE(entry.good());
- EXPECT_EQ(entry_handle, entry.Get(META_HANDLE));
- EXPECT_TRUE(entry.Get(NON_UNIQUE_NAME) == entry_name);
- EXPECT_TRUE(entry.Get(PARENT_ID) == folder_id);
- }
-}
-
-TEST_F(SyncableDirectoryTest, TestParentIdIndexUpdate) {
- std::string child_name = "child";
-
- WriteTransaction wt(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry parent_folder(&wt, CREATE, wt.root_id(), "folder1");
- parent_folder.Put(IS_UNSYNCED, true);
- EXPECT_TRUE(parent_folder.Put(IS_DIR, true));
-
- MutableEntry parent_folder2(&wt, CREATE, wt.root_id(), "folder2");
- parent_folder2.Put(IS_UNSYNCED, true);
- EXPECT_TRUE(parent_folder2.Put(IS_DIR, true));
-
- MutableEntry child(&wt, CREATE, parent_folder.Get(ID), child_name);
- EXPECT_TRUE(child.Put(IS_DIR, true));
- child.Put(IS_UNSYNCED, true);
-
- ASSERT_TRUE(child.good());
-
- EXPECT_EQ(0, CountEntriesWithName(&wt, wt.root_id(), child_name));
- EXPECT_EQ(parent_folder.Get(ID), child.Get(PARENT_ID));
- EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
- EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
- child.Put(PARENT_ID, parent_folder2.Get(ID));
- EXPECT_EQ(parent_folder2.Get(ID), child.Get(PARENT_ID));
- EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
- EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
-}
-
-TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
- std::string folder_name = "folder";
- std::string new_name = "new_name";
-
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, trans.root_id(), folder_name);
- ASSERT_TRUE(folder.good());
- ASSERT_TRUE(folder.Put(IS_DIR, true));
- ASSERT_TRUE(folder.Put(IS_DEL, true));
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
-
- MutableEntry deleted(&trans, GET_BY_ID, folder.Get(ID));
- ASSERT_TRUE(deleted.good());
- ASSERT_TRUE(deleted.Put(PARENT_ID, trans.root_id()));
- ASSERT_TRUE(deleted.Put(NON_UNIQUE_NAME, new_name));
-
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
- EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), new_name));
-}
-
-TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry folder(&trans, CREATE, trans.root_id(), "CaseChange");
- ASSERT_TRUE(folder.good());
- EXPECT_TRUE(folder.Put(PARENT_ID, trans.root_id()));
- EXPECT_TRUE(folder.Put(NON_UNIQUE_NAME, "CASECHANGE"));
- EXPECT_TRUE(folder.Put(IS_DEL, true));
-}
-
-// Create items of each model type, and check that GetModelType and
-// GetServerModelType return the right value.
-TEST_F(SyncableDirectoryTest, GetModelType) {
- TestIdFactory id_factory;
- for (int i = 0; i < MODEL_TYPE_COUNT; ++i) {
- ModelType datatype = ModelTypeFromInt(i);
- SCOPED_TRACE(testing::Message("Testing model type ") << datatype);
- switch (datatype) {
- case UNSPECIFIED:
- case TOP_LEVEL_FOLDER:
- continue; // Datatype isn't a function of Specifics.
- default:
- break;
- }
- sync_pb::EntitySpecifics specifics;
- AddDefaultFieldValue(datatype, &specifics);
-
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry folder(&trans, CREATE, trans.root_id(), "Folder");
- ASSERT_TRUE(folder.good());
- folder.Put(ID, id_factory.NewServerId());
- folder.Put(SPECIFICS, specifics);
- folder.Put(BASE_VERSION, 1);
- folder.Put(IS_DIR, true);
- folder.Put(IS_DEL, false);
- ASSERT_EQ(datatype, folder.GetModelType());
-
- MutableEntry item(&trans, CREATE, trans.root_id(), "Item");
- ASSERT_TRUE(item.good());
- item.Put(ID, id_factory.NewServerId());
- item.Put(SPECIFICS, specifics);
- item.Put(BASE_VERSION, 1);
- item.Put(IS_DIR, false);
- item.Put(IS_DEL, false);
- ASSERT_EQ(datatype, item.GetModelType());
-
- // It's critical that deletion records retain their datatype, so that
- // they can be dispatched to the appropriate change processor.
- MutableEntry deleted_item(&trans, CREATE, trans.root_id(), "Deleted Item");
- ASSERT_TRUE(item.good());
- deleted_item.Put(ID, id_factory.NewServerId());
- deleted_item.Put(SPECIFICS, specifics);
- deleted_item.Put(BASE_VERSION, 1);
- deleted_item.Put(IS_DIR, false);
- deleted_item.Put(IS_DEL, true);
- ASSERT_EQ(datatype, deleted_item.GetModelType());
-
- MutableEntry server_folder(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(server_folder.good());
- server_folder.Put(SERVER_SPECIFICS, specifics);
- server_folder.Put(BASE_VERSION, 1);
- server_folder.Put(SERVER_IS_DIR, true);
- server_folder.Put(SERVER_IS_DEL, false);
- ASSERT_EQ(datatype, server_folder.GetServerModelType());
-
- MutableEntry server_item(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(server_item.good());
- server_item.Put(SERVER_SPECIFICS, specifics);
- server_item.Put(BASE_VERSION, 1);
- server_item.Put(SERVER_IS_DIR, false);
- server_item.Put(SERVER_IS_DEL, false);
- ASSERT_EQ(datatype, server_item.GetServerModelType());
-
- browser_sync::SyncEntity folder_entity;
- folder_entity.set_id(id_factory.NewServerId());
- folder_entity.set_deleted(false);
- folder_entity.set_folder(true);
- folder_entity.mutable_specifics()->CopyFrom(specifics);
- ASSERT_EQ(datatype, folder_entity.GetModelType());
-
- browser_sync::SyncEntity item_entity;
- item_entity.set_id(id_factory.NewServerId());
- item_entity.set_deleted(false);
- item_entity.set_folder(false);
- item_entity.mutable_specifics()->CopyFrom(specifics);
- ASSERT_EQ(datatype, item_entity.GetModelType());
- }
-}
-
-// A variant of SyncableDirectoryTest that uses a real sqlite database.
-class OnDiskSyncableDirectoryTest : public SyncableDirectoryTest {
- protected:
- // SetUp() is called before each test case is run.
- // The sqlite3 DB is deleted before each test is run.
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- file_path_ = temp_dir_.path().Append(
- FILE_PATH_LITERAL("Test.sqlite3"));
- file_util::Delete(file_path_, true);
- dir_.reset(new Directory(&encryptor_, &handler_, NULL));
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
- &delegate_, NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
- }
-
- virtual void TearDown() {
- // This also closes file handles.
- dir_->SaveChanges();
- dir_.reset();
- file_util::Delete(file_path_, true);
- }
-
- void ReloadDir() {
- dir_.reset(new Directory(&encryptor_, &handler_, NULL));
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
- &delegate_, NullTransactionObserver()));
- }
-
- void SaveAndReloadDir() {
- dir_->SaveChanges();
- ReloadDir();
- }
-
- void SwapInUnsaveableDirectory() {
- dir_.reset(); // Delete the old directory.
-
- // We first assign the object to a pointer of type TestUnsaveableDirectory
- // because the OpenUnsaveable function is not available in the parent class.
- scoped_ptr<TestUnsaveableDirectory> dir(new TestUnsaveableDirectory());
- ASSERT_TRUE(dir.get());
- ASSERT_EQ(OPENED, dir->OpenUnsaveable(
- file_path_, kName, &delegate_, NullTransactionObserver()));
-
- // Finally, move the unsaveable directory to the dir_ variable.
- dir_ = dir.Pass();
- }
-
- ScopedTempDir temp_dir_;
- FilePath file_path_;
-};
-
-TEST_F(OnDiskSyncableDirectoryTest, TestPurgeEntriesWithTypeIn) {
- sync_pb::EntitySpecifics bookmark_specs;
- sync_pb::EntitySpecifics autofill_specs;
- sync_pb::EntitySpecifics preference_specs;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
- AddDefaultFieldValue(PREFERENCES, &preference_specs);
- AddDefaultFieldValue(AUTOFILL, &autofill_specs);
- dir_->set_initial_sync_ended_for_type(BOOKMARKS, true);
- dir_->set_initial_sync_ended_for_type(PREFERENCES, true);
- dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
-
- syncable::ModelTypeSet types_to_purge(PREFERENCES, AUTOFILL);
-
- TestIdFactory id_factory;
- // Create some items for each type.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry item1(&trans, CREATE, trans.root_id(), "Item");
- ASSERT_TRUE(item1.good());
- item1.Put(SPECIFICS, bookmark_specs);
- item1.Put(SERVER_SPECIFICS, bookmark_specs);
- item1.Put(IS_UNSYNCED, true);
-
- MutableEntry item2(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item2.good());
- item2.Put(SERVER_SPECIFICS, bookmark_specs);
- item2.Put(IS_UNAPPLIED_UPDATE, true);
-
- MutableEntry item3(&trans, CREATE, trans.root_id(), "Item");
- ASSERT_TRUE(item3.good());
- item3.Put(SPECIFICS, preference_specs);
- item3.Put(SERVER_SPECIFICS, preference_specs);
- item3.Put(IS_UNSYNCED, true);
-
- MutableEntry item4(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item4.good());
- item4.Put(SERVER_SPECIFICS, preference_specs);
- item4.Put(IS_UNAPPLIED_UPDATE, true);
-
- MutableEntry item5(&trans, CREATE, trans.root_id(), "Item");
- ASSERT_TRUE(item5.good());
- item5.Put(SPECIFICS, autofill_specs);
- item5.Put(SERVER_SPECIFICS, autofill_specs);
- item5.Put(IS_UNSYNCED, true);
-
- MutableEntry item6(&trans, CREATE_NEW_UPDATE_ITEM,
- id_factory.NewServerId());
- ASSERT_TRUE(item6.good());
- item6.Put(SERVER_SPECIFICS, autofill_specs);
- item6.Put(IS_UNAPPLIED_UPDATE, true);
- }
-
- dir_->SaveChanges();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- MetahandleSet all_set;
- GetAllMetaHandles(&trans, &all_set);
- ASSERT_EQ(7U, all_set.size());
- }
-
- dir_->PurgeEntriesWithTypeIn(types_to_purge);
-
- // We first query the in-memory data, and then reload the directory (without
- // saving) to verify that disk does not still have the data.
- CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, true);
- SaveAndReloadDir();
- CheckPurgeEntriesWithTypeInSucceeded(types_to_purge, false);
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestShareInfo) {
- dir_->set_initial_sync_ended_for_type(AUTOFILL, true);
- dir_->set_store_birthday("Jan 31st");
- dir_->SetNotificationState("notification_state");
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
- EXPECT_EQ("Jan 31st", dir_->store_birthday());
- EXPECT_EQ("notification_state", dir_->GetNotificationState());
- }
- dir_->set_store_birthday("April 10th");
- dir_->SetNotificationState("notification_state2");
- dir_->SaveChanges();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
- EXPECT_EQ("April 10th", dir_->store_birthday());
- EXPECT_EQ("notification_state2", dir_->GetNotificationState());
- }
- dir_->SetNotificationState("notification_state2");
- // Restore the directory from disk. Make sure that nothing's changed.
- SaveAndReloadDir();
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- EXPECT_TRUE(dir_->initial_sync_ended_for_type(AUTOFILL));
- EXPECT_FALSE(dir_->initial_sync_ended_for_type(BOOKMARKS));
- EXPECT_EQ("April 10th", dir_->store_birthday());
- EXPECT_EQ("notification_state2", dir_->GetNotificationState());
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest,
- TestSimpleFieldsPreservedDuringSaveChanges) {
- Id update_id = TestIdFactory::FromNumber(1);
- Id create_id;
- EntryKernel create_pre_save, update_pre_save;
- EntryKernel create_post_save, update_post_save;
- std::string create_name = "Create";
-
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry create(&trans, CREATE, trans.root_id(), create_name);
- MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, update_id);
- create.Put(IS_UNSYNCED, true);
- update.Put(IS_UNAPPLIED_UPDATE, true);
- sync_pb::EntitySpecifics specifics;
- specifics.mutable_bookmark()->set_favicon("PNG");
- specifics.mutable_bookmark()->set_url("http://nowhere");
- create.Put(SPECIFICS, specifics);
- create_pre_save = create.GetKernelCopy();
- update_pre_save = update.GetKernelCopy();
- create_id = create.Get(ID);
- }
-
- dir_->SaveChanges();
- dir_.reset(new Directory(&encryptor_, &handler_, NULL));
- ASSERT_TRUE(dir_.get());
- ASSERT_EQ(OPENED, dir_->Open(file_path_, kName,
- &delegate_, NullTransactionObserver()));
- ASSERT_TRUE(dir_->good());
-
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry create(&trans, GET_BY_ID, create_id);
- EXPECT_EQ(1, CountEntriesWithName(&trans, trans.root_id(), create_name));
- Entry update(&trans, GET_BY_ID, update_id);
- create_post_save = create.GetKernelCopy();
- update_post_save = update.GetKernelCopy();
- }
- int i = BEGIN_FIELDS;
- for ( ; i < INT64_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((Int64Field)i),
- create_post_save.ref((Int64Field)i))
- << "int64 field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((Int64Field)i),
- update_post_save.ref((Int64Field)i))
- << "int64 field #" << i << " changed during save/load";
- }
- for ( ; i < TIME_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((TimeField)i),
- create_post_save.ref((TimeField)i))
- << "time field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((TimeField)i),
- update_post_save.ref((TimeField)i))
- << "time field #" << i << " changed during save/load";
- }
- for ( ; i < ID_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((IdField)i),
- create_post_save.ref((IdField)i))
- << "id field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((IdField)i),
- update_pre_save.ref((IdField)i))
- << "id field #" << i << " changed during save/load";
- }
- for ( ; i < BIT_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((BitField)i),
- create_post_save.ref((BitField)i))
- << "Bit field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((BitField)i),
- update_post_save.ref((BitField)i))
- << "Bit field #" << i << " changed during save/load";
- }
- for ( ; i < STRING_FIELDS_END ; ++i) {
- EXPECT_EQ(create_pre_save.ref((StringField)i),
- create_post_save.ref((StringField)i))
- << "String field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((StringField)i),
- update_post_save.ref((StringField)i))
- << "String field #" << i << " changed during save/load";
- }
- for ( ; i < PROTO_FIELDS_END; ++i) {
- EXPECT_EQ(create_pre_save.ref((ProtoField)i).SerializeAsString(),
- create_post_save.ref((ProtoField)i).SerializeAsString())
- << "Blob field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((ProtoField)i).SerializeAsString(),
- update_post_save.ref((ProtoField)i).SerializeAsString())
- << "Blob field #" << i << " changed during save/load";
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailure) {
- int64 handle1 = 0;
- // Set up an item using a regular, saveable directory.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera");
- ASSERT_TRUE(e1.good());
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- handle1 = e1.Get(META_HANDLE);
- e1.Put(BASE_VERSION, 1);
- e1.Put(IS_DIR, true);
- e1.Put(ID, TestIdFactory::FromNumber(101));
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Make sure the item is no longer dirty after saving,
- // and make a modification.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(aguilera.good());
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_EQ(aguilera.Get(NON_UNIQUE_NAME), "aguilera");
- aguilera.Put(NON_UNIQUE_NAME, "overwritten");
- EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Now do some operations using a directory for which SaveChanges will
- // always fail.
- SwapInUnsaveableDirectory();
- ASSERT_TRUE(dir_->good());
-
- int64 handle2 = 0;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(aguilera.good());
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_EQ(aguilera.Get(NON_UNIQUE_NAME), "overwritten");
- EXPECT_FALSE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_FALSE(IsInDirtyMetahandles(handle1));
- aguilera.Put(NON_UNIQUE_NAME, "christina");
- EXPECT_TRUE(aguilera.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
-
- // New item.
- MutableEntry kids_on_block(&trans, CREATE, trans.root_id(), "kids");
- ASSERT_TRUE(kids_on_block.good());
- handle2 = kids_on_block.Get(META_HANDLE);
- kids_on_block.Put(BASE_VERSION, 1);
- kids_on_block.Put(IS_DIR, true);
- kids_on_block.Put(ID, TestIdFactory::FromNumber(102));
- EXPECT_TRUE(kids_on_block.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle2));
- }
-
- // We are using an unsaveable directory, so this can't succeed. However,
- // the HandleSaveChangesFailure code path should have been triggered.
- ASSERT_FALSE(dir_->SaveChanges());
-
- // Make sure things were rolled back and the world is as it was before call.
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry e1(&trans, GET_BY_HANDLE, handle1);
- ASSERT_TRUE(e1.good());
- EntryKernel aguilera = e1.GetKernelCopy();
- Entry kids(&trans, GET_BY_HANDLE, handle2);
- ASSERT_TRUE(kids.good());
- EXPECT_TRUE(kids.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle2));
- EXPECT_TRUE(aguilera.is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
-}
-
-TEST_F(OnDiskSyncableDirectoryTest, TestSaveChangesFailureWithPurge) {
- int64 handle1 = 0;
- // Set up an item using a regular, saveable directory.
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
-
- MutableEntry e1(&trans, CREATE, trans.root_id(), "aguilera");
- ASSERT_TRUE(e1.good());
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- handle1 = e1.Get(META_HANDLE);
- e1.Put(BASE_VERSION, 1);
- e1.Put(IS_DIR, true);
- e1.Put(ID, TestIdFactory::FromNumber(101));
- sync_pb::EntitySpecifics bookmark_specs;
- AddDefaultFieldValue(BOOKMARKS, &bookmark_specs);
- e1.Put(SPECIFICS, bookmark_specs);
- e1.Put(SERVER_SPECIFICS, bookmark_specs);
- e1.Put(ID, TestIdFactory::FromNumber(101));
- EXPECT_TRUE(e1.GetKernelCopy().is_dirty());
- EXPECT_TRUE(IsInDirtyMetahandles(handle1));
- }
- ASSERT_TRUE(dir_->SaveChanges());
-
- // Now do some operations using a directory for which SaveChanges will
- // always fail.
- SwapInUnsaveableDirectory();
- ASSERT_TRUE(dir_->good());
-
- syncable::ModelTypeSet set(BOOKMARKS);
- dir_->PurgeEntriesWithTypeIn(set);
- EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
- ASSERT_FALSE(dir_->SaveChanges());
- EXPECT_TRUE(IsInMetahandlesToPurge(handle1));
-}
-
-} // namespace
-
-void SyncableDirectoryTest::ValidateEntry(BaseTransaction* trans,
- int64 id,
- bool check_name,
- const std::string& name,
- int64 base_version,
- int64 server_version,
- bool is_del) {
- Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
- ASSERT_TRUE(e.good());
- if (check_name)
- ASSERT_TRUE(name == e.Get(NON_UNIQUE_NAME));
- ASSERT_TRUE(base_version == e.Get(BASE_VERSION));
- ASSERT_TRUE(server_version == e.Get(SERVER_VERSION));
- ASSERT_TRUE(is_del == e.Get(IS_DEL));
-}
-
-namespace {
-
-class SyncableDirectoryManagement : public testing::Test {
- public:
- virtual void SetUp() {
- ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- }
-
- virtual void TearDown() {
- }
- protected:
- MessageLoop message_loop_;
- ScopedTempDir temp_dir_;
- FakeEncryptor encryptor_;
- TestUnrecoverableErrorHandler handler_;
- NullDirectoryChangeDelegate delegate_;
-};
-
-TEST_F(SyncableDirectoryManagement, TestFileRelease) {
- FilePath path = temp_dir_.path().Append(
- Directory::kSyncDatabaseFilename);
-
- syncable::Directory dir(&encryptor_, &handler_, NULL);
- DirOpenResult result =
- dir.Open(path, "ScopeTest", &delegate_, NullTransactionObserver());
- ASSERT_EQ(result, OPENED);
- dir.Close();
-
- // Closing the directory should have released the backing database file.
- ASSERT_TRUE(file_util::Delete(path, true));
-}
-
-class StressTransactionsDelegate : public base::PlatformThread::Delegate {
- public:
- StressTransactionsDelegate(Directory* dir, int thread_number)
- : dir_(dir),
- thread_number_(thread_number) {}
-
- private:
- Directory* const dir_;
- const int thread_number_;
-
- // PlatformThread::Delegate methods:
- virtual void ThreadMain() {
- int entry_count = 0;
- std::string path_name;
-
- for (int i = 0; i < 20; ++i) {
- const int rand_action = rand() % 10;
- if (rand_action < 4 && !path_name.empty()) {
- ReadTransaction trans(FROM_HERE, dir_);
- CHECK(1 == CountEntriesWithName(&trans, trans.root_id(), path_name));
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
- rand() % 10));
- } else {
- std::string unique_name =
- base::StringPrintf("%d.%d", thread_number_, entry_count++);
- path_name.assign(unique_name.begin(), unique_name.end());
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_);
- MutableEntry e(&trans, CREATE, trans.root_id(), path_name);
- CHECK(e.good());
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
- rand() % 20));
- e.Put(IS_UNSYNCED, true);
- if (e.Put(ID, TestIdFactory::FromNumber(rand())) &&
- e.Get(ID).ServerKnows() && !e.Get(ID).IsRoot()) {
- e.Put(BASE_VERSION, 1);
- }
- }
- }
- }
-
- DISALLOW_COPY_AND_ASSIGN(StressTransactionsDelegate);
-};
-
-TEST(SyncableDirectory, StressTransactions) {
- MessageLoop message_loop;
- ScopedTempDir temp_dir;
- ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FakeEncryptor encryptor;
- TestUnrecoverableErrorHandler handler;
- NullDirectoryChangeDelegate delegate;
- Directory dir(&encryptor, &handler, NULL);
- FilePath path = temp_dir.path().Append(Directory::kSyncDatabaseFilename);
- file_util::Delete(path, true);
- std::string dirname = "stress";
- dir.Open(path, dirname, &delegate, NullTransactionObserver());
-
- const int kThreadCount = 7;
- base::PlatformThreadHandle threads[kThreadCount];
- scoped_ptr<StressTransactionsDelegate> thread_delegates[kThreadCount];
-
- for (int i = 0; i < kThreadCount; ++i) {
- thread_delegates[i].reset(new StressTransactionsDelegate(&dir, i));
- ASSERT_TRUE(base::PlatformThread::Create(
- 0, thread_delegates[i].get(), &threads[i]));
- }
-
- for (int i = 0; i < kThreadCount; ++i) {
- base::PlatformThread::Join(threads[i]);
- }
-
- dir.Close();
- file_util::Delete(path, true);
-}
-
-class SyncableClientTagTest : public SyncableDirectoryTest {
- public:
- static const int kBaseVersion = 1;
- const char* test_name_;
- const char* test_tag_;
-
- SyncableClientTagTest() : test_name_("test_name"), test_tag_("dietcoke") {}
-
- bool CreateWithDefaultTag(Id id, bool deleted) {
- return CreateWithTag(test_tag_, id, deleted);
- }
-
- // Attempt to create an entry with a default tag.
- bool CreateWithTag(const char* tag, Id id, bool deleted) {
- WriteTransaction wtrans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), test_name_);
- CHECK(me.good());
- me.Put(ID, id);
- if (id.ServerKnows()) {
- me.Put(BASE_VERSION, kBaseVersion);
- }
- me.Put(IS_DEL, deleted);
- me.Put(IS_UNSYNCED, true);
- me.Put(IS_DIR, false);
- return me.Put(UNIQUE_CLIENT_TAG, tag);
- }
-
- // Verify an entry exists with the default tag.
- void VerifyTag(Id id, bool deleted) {
- // Should still be present and valid in the client tag index.
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
- CHECK(me.good());
- EXPECT_EQ(me.Get(ID), id);
- EXPECT_EQ(me.Get(UNIQUE_CLIENT_TAG), test_tag_);
- EXPECT_EQ(me.Get(IS_DEL), deleted);
- EXPECT_EQ(me.Get(IS_UNSYNCED), true);
- }
-
- protected:
- TestIdFactory factory_;
-};
-
-TEST_F(SyncableClientTagTest, TestClientTagClear) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
- MutableEntry me(&trans, GET_BY_CLIENT_TAG, test_tag_);
- EXPECT_TRUE(me.good());
- me.Put(UNIQUE_CLIENT_TAG, "");
- }
- {
- ReadTransaction trans(FROM_HERE, dir_.get());
- Entry by_tag(&trans, GET_BY_CLIENT_TAG, test_tag_);
- EXPECT_FALSE(by_tag.good());
-
- Entry by_id(&trans, GET_BY_ID, server_id);
- EXPECT_TRUE(by_id.good());
- EXPECT_TRUE(by_id.Get(UNIQUE_CLIENT_TAG).empty());
- }
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexServerId) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, false));
- VerifyTag(server_id, false);
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexClientId) {
- Id client_id = factory_.NewLocalId();
- EXPECT_TRUE(CreateWithDefaultTag(client_id, false));
- VerifyTag(client_id, false);
-}
-
-TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexClientId) {
- Id client_id = factory_.NewLocalId();
- EXPECT_TRUE(CreateWithDefaultTag(client_id, true));
- VerifyTag(client_id, true);
-}
-
-TEST_F(SyncableClientTagTest, TestDeletedClientTagIndexServerId) {
- Id server_id = factory_.NewServerId();
- EXPECT_TRUE(CreateWithDefaultTag(server_id, true));
- VerifyTag(server_id, true);
-}
-
-TEST_F(SyncableClientTagTest, TestClientTagIndexDuplicateServer) {
- EXPECT_TRUE(CreateWithDefaultTag(factory_.NewServerId(), true));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), true));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewServerId(), false));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), false));
- EXPECT_FALSE(CreateWithDefaultTag(factory_.NewLocalId(), true));
-}
-
-} // namespace
-} // namespace syncable
diff --git a/chrome/browser/sync/syncable/transaction_observer.h b/chrome/browser/sync/syncable/transaction_observer.h
deleted file mode 100644
index 3d1f6e2..0000000
--- a/chrome/browser/sync/syncable/transaction_observer.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
-#define CHROME_BROWSER_SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_
-#pragma once
-
-#include "chrome/browser/sync/syncable/model_type.h"
-#include "chrome/browser/sync/syncable/syncable.h"
-
-namespace syncable {
-
-class TransactionObserver {
- public:
- virtual void OnTransactionWrite(
- const ImmutableWriteTransactionInfo& write_transaction_info,
- ModelTypeSet models_with_changes) = 0;
- protected:
- virtual ~TransactionObserver() {}
-};
-
-} // namespace syncable
-
-#endif // CHROME_BROWSER_SYNC_SYNCABLE_TRANSACTION_OBSERVER_H_