summaryrefslogtreecommitdiffstats
path: root/chrome/browser/sync/syncable
diff options
context:
space:
mode:
Diffstat (limited to 'chrome/browser/sync/syncable')
-rw-r--r--chrome/browser/sync/syncable/blob.h16
-rw-r--r--chrome/browser/sync/syncable/dir_open_result.h17
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.cc657
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h123
-rw-r--r--chrome/browser/sync/syncable/directory_event.h21
-rw-r--r--chrome/browser/sync/syncable/directory_manager.cc169
-rw-r--r--chrome/browser/sync/syncable/directory_manager.h128
-rw-r--r--chrome/browser/sync/syncable/path_name_cmp.h20
-rw-r--r--chrome/browser/sync/syncable/syncable-inl.h30
-rw-r--r--chrome/browser/sync/syncable/syncable.cc2002
-rw-r--r--chrome/browser/sync/syncable/syncable.h1419
-rw-r--r--chrome/browser/sync/syncable/syncable_changes_version.h29
-rw-r--r--chrome/browser/sync/syncable/syncable_columns.h78
-rw-r--r--chrome/browser/sync/syncable/syncable_id.cc72
-rw-r--r--chrome/browser/sync/syncable/syncable_id.h114
-rw-r--r--chrome/browser/sync/syncable/syncable_id_unittest.cc44
-rw-r--r--chrome/browser/sync/syncable/syncable_unittest.cc1554
17 files changed, 6493 insertions, 0 deletions
diff --git a/chrome/browser/sync/syncable/blob.h b/chrome/browser/sync/syncable/blob.h
new file mode 100644
index 0000000..0d7f33a
--- /dev/null
+++ b/chrome/browser/sync/syncable/blob.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
+
+#include <vector>
+
+namespace syncable {
+
+typedef std::vector<uint8> Blob;
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_BLOB_H_
diff --git a/chrome/browser/sync/syncable/dir_open_result.h b/chrome/browser/sync/syncable/dir_open_result.h
new file mode 100644
index 0000000..e122319
--- /dev/null
+++ b/chrome/browser/sync/syncable/dir_open_result.h
@@ -0,0 +1,17 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
+
+namespace syncable {
+enum DirOpenResult { OPENED, // success.
+ FAILED_NEWER_VERSION, // DB version is too new.
+ FAILED_MAKE_REPOSITORY, // Couldn't create subdir.
+ FAILED_OPEN_DATABASE, // sqlite_open() failed.
+ FAILED_DISK_FULL, // The disk is full.
+};
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIR_OPEN_RESULT_H_
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
new file mode 100644
index 0000000..2f13f93
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -0,0 +1,657 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+
+#ifdef OS_MACOSX
+#include <CoreFoundation/CoreFoundation.h>
+#elif defined(OS_LINUX)
+#include <glib.h>
+#endif
+
+#include <string>
+
+#include "base/hash_tables.h"
+#include "base/logging.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable_columns.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+// If sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite
+// datatype.
+COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32);
+
+using std::string;
+
+namespace syncable {
+
+// This just has to be big enough to hold an UPDATE or
+// INSERT statement that modifies all the columns in the entry table.
+static const string::size_type kUpdateStatementBufferSize = 2048;
+
+// Increment this version whenever updating DB tables.
+static const int32 kCurrentDBVersion = 67;
+
+// TODO(sync): remove
+static void PathNameMatch16(sqlite3_context *context, int argc,
+ sqlite3_value **argv) {
+ const PathString pathspec(reinterpret_cast<const PathChar*>
+ (sqlite3_value_text16(argv[0])), sqlite3_value_bytes16(argv[0]) / 2);
+
+ const void* name_text = sqlite3_value_text16(argv[1]);
+ int name_bytes = sqlite3_value_bytes16(argv[1]);
+ // If the text is null, we need to avoid the PathString constructor.
+ if (name_text != NULL) {
+ // Have to copy to append a terminating 0 anyway.
+ const PathString name(reinterpret_cast<const PathChar*>
+ (sqlite3_value_text16(argv[1])),
+ sqlite3_value_bytes16(argv[1]) / 2);
+ sqlite3_result_int(context, PathNameMatch(name, pathspec));
+ } else {
+ sqlite3_result_int(context, PathNameMatch(PathString(), pathspec));
+ }
+}
+
+// Sqlite allows setting of the escape character in an ESCAPE clause and
+// this character is passed in as a third character to the like function.
+// See: http://www.sqlite.org/lang_expr.html
+static void PathNameMatch16WithEscape(sqlite3_context *context,
+ int argc, sqlite3_value **argv) {
+ // Never seen this called, but just in case.
+ LOG(FATAL) << "PathNameMatch16WithEscape() not implemented";
+}
+
+static void RegisterPathNameCollate(sqlite3* dbhandle) {
+#ifdef OS_WINDOWS
+ const int collate = SQLITE_UTF16;
+#else
+ const int collate = SQLITE_UTF8;
+#endif
+ CHECK(SQLITE_OK == sqlite3_create_collation(dbhandle, "PATHNAME", collate,
+ NULL, &ComparePathNames16));
+}
+
+// Replace the LIKE operator with our own implementation that
+// does file spec matching like "*.txt".
+static void RegisterPathNameMatch(sqlite3* dbhandle) {
+ // We only register this on Windows. We use the normal sqlite
+ // matching function on mac/linux.
+ // note that the function PathNameMatch() does a simple ==
+ // comparison on mac, so that would have to be fixed if
+ // we really wanted to use PathNameMatch on mac/linux w/ the
+ // same pattern strings as we do on windows.
+#ifdef OS_WINDOWS
+ CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
+ 2, SQLITE_ANY, NULL, &PathNameMatch16, NULL, NULL));
+ CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
+ 3, SQLITE_ANY, NULL, &PathNameMatch16WithEscape, NULL, NULL));
+#endif // OS_WINDOWS
+}
+
+static inline bool IsSqliteErrorOurFault(int result) {
+ switch (result) {
+ case SQLITE_MISMATCH:
+ case SQLITE_CONSTRAINT:
+ case SQLITE_MISUSE:
+ case SQLITE_RANGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+namespace {
+// This small helper class reduces the amount of code in the table upgrade code
+// below and also CHECKs as soon as there's an issue.
+class StatementExecutor {
+ public:
+ explicit StatementExecutor(sqlite3* dbhandle) : dbhandle_(dbhandle) {
+ result_ = SQLITE_DONE;
+ }
+ int Exec(const char* query) {
+ if (SQLITE_DONE != result_)
+ return result_;
+ result_ = ::Exec(dbhandle_, query);
+ CHECK(!IsSqliteErrorOurFault(result_)) << query;
+ return result_;
+ }
+ template <typename T1>
+ int Exec(const char* query, T1 arg1) {
+ if (SQLITE_DONE != result_)
+ return result_;
+ result_ = ::Exec(dbhandle_, query, arg1);
+ CHECK(!IsSqliteErrorOurFault(result_)) << query;
+ return result_;
+ }
+ int result() {
+ return result_;
+ }
+ void set_result(int result) {
+ result_ = result;
+ CHECK(!IsSqliteErrorOurFault(result_)) << result_;
+ }
+ bool healthy() const {
+ return SQLITE_DONE == result_;
+ }
+ private:
+ sqlite3* dbhandle_;
+ int result_;
+ DISALLOW_COPY_AND_ASSIGN(StatementExecutor);
+};
+
+} // namespace
+
+static string GenerateCacheGUID() {
+ return Generate128BitRandomHexString();
+}
+
+// Iterate over the fields of |entry| and bind dirty ones to |statement| for
+// updating. Returns the number of args bound.
+static int BindDirtyFields(const EntryKernel& entry, sqlite3_stmt* statement) {
+ int index = 1;
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<Int64Field>(i)), index++);
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<IdField>(i)), index++);
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<BitField>(i)), index++);
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<StringField>(i)), index++);
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i])
+ BindArg(statement, entry.ref(static_cast<BlobField>(i)), index++);
+ }
+ return index - 1;
+}
+
+// The caller owns the returned EntryKernel*.
+static EntryKernel* UnpackEntry(sqlite3_stmt* statement) {
+ EntryKernel* result = NULL;
+ int query_result = sqlite3_step(statement);
+ if (SQLITE_ROW == query_result) {
+ result = new EntryKernel;
+ CHECK(sqlite3_column_count(statement) == static_cast<int>(FIELD_COUNT));
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ result->ref(static_cast<Int64Field>(i)) =
+ sqlite3_column_int64(statement, i);
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<IdField>(i)));
+ }
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ result->ref(static_cast<BitField>(i)) =
+ (0 != sqlite3_column_int(statement, i));
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<StringField>(i)));
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ GetColumn(statement, i, &result->ref(static_cast<BlobField>(i)));
+ }
+ ZeroFields(result, i);
+ } else {
+ CHECK(SQLITE_DONE == query_result);
+ result = NULL;
+ }
+ return result;
+}
+
+static bool StepDone(sqlite3_stmt* statement, const char* failed_call) {
+ int result = sqlite3_step(statement);
+ if (SQLITE_DONE == result && SQLITE_OK == (result = sqlite3_reset(statement)))
+ return true;
+ // Some error code.
+ LOG(WARNING) << failed_call << " failed with result " << result;
+ CHECK(!IsSqliteErrorOurFault(result));
+ return false;
+}
+
+static string ComposeCreateTableColumnSpecs(const ColumnSpec* begin,
+ const ColumnSpec* end) {
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ char separator = '(';
+ for (const ColumnSpec* column = begin; column != end; ++column) {
+ query.push_back(separator);
+ separator = ',';
+ query.append(column->name);
+ query.push_back(' ');
+ query.append(column->spec);
+ }
+ query.push_back(')');
+ return query;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// DirectoryBackingStore implementation.
+
+DirectoryBackingStore::DirectoryBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath)
+ : dir_name_(dir_name), backing_filepath_(backing_filepath),
+ load_dbhandle_(NULL), save_dbhandle_(NULL) {
+}
+
+DirectoryBackingStore::~DirectoryBackingStore() {
+ if (NULL != load_dbhandle_) {
+ sqlite3_close(load_dbhandle_);
+ load_dbhandle_ = NULL;
+ }
+ if (NULL != save_dbhandle_) {
+ sqlite3_close(save_dbhandle_);
+ save_dbhandle_ = NULL;
+ }
+}
+
+bool DirectoryBackingStore::OpenAndConfigureHandleHelper(
+ sqlite3** handle) const {
+ if (SQLITE_OK == SqliteOpen(backing_filepath_.c_str(), handle)) {
+ sqlite3_busy_timeout(*handle, kDirectoryBackingStoreBusyTimeoutMs);
+ RegisterPathNameCollate(*handle);
+ RegisterPathNameMatch(*handle);
+ return true;
+ }
+ return false;
+}
+
+DirOpenResult DirectoryBackingStore::Load(MetahandlesIndex* entry_bucket,
+ ExtendedAttributes* xattrs_bucket,
+ Directory::KernelLoadInfo* kernel_load_info) {
+ DCHECK(load_dbhandle_ == NULL);
+ if (!OpenAndConfigureHandleHelper(&load_dbhandle_))
+ return FAILED_OPEN_DATABASE;
+
+ DirOpenResult result = InitializeTables();
+ if (OPENED != result)
+ return result;
+
+ DropDeletedEntries();
+ LoadEntries(entry_bucket);
+ LoadExtendedAttributes(xattrs_bucket);
+ LoadInfo(kernel_load_info);
+
+ sqlite3_close(load_dbhandle_);
+ load_dbhandle_ = NULL; // No longer used.
+
+ return OPENED;
+}
+
+bool DirectoryBackingStore::SaveChanges(
+ const Directory::SaveChangesSnapshot& snapshot) {
+ bool disk_full = false;
+ sqlite3* dbhandle = LazyGetSaveHandle();
+ {
+ {
+ ScopedStatement begin(PrepareQuery(dbhandle,
+ "BEGIN EXCLUSIVE TRANSACTION"));
+ if (!StepDone(begin.get(), "BEGIN")) {
+ disk_full = true;
+ goto DoneDBTransaction;
+ }
+ }
+
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ !disk_full && i != snapshot.dirty_metas.end(); ++i) {
+ DCHECK(i->dirty.any());
+ disk_full = !SaveEntryToDB(*i);
+ }
+
+ for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ !disk_full && i != snapshot.dirty_xattrs.end(); ++i) {
+ DCHECK(i->second.dirty);
+ if (i->second.is_deleted) {
+ disk_full = !DeleteExtendedAttributeFromDB(i);
+ } else {
+ disk_full = !SaveExtendedAttributeToDB(i);
+ }
+ }
+
+ if (!disk_full && (Directory::KERNEL_SHARE_INFO_DIRTY ==
+ snapshot.kernel_info_status)) {
+ const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
+ ScopedStatement update(PrepareQuery(dbhandle, "UPDATE share_info "
+ "SET last_sync_timestamp = ?, initial_sync_ended = ?, "
+ "store_birthday = ?, "
+ "next_id = ?",
+ info.last_sync_timestamp,
+ info.initial_sync_ended,
+ info.store_birthday,
+ info.next_id));
+ disk_full = !(StepDone(update.get(), "UPDATE share_info")
+ && 1 == sqlite3_changes(dbhandle));
+ }
+ if (disk_full) {
+ ExecOrDie(dbhandle, "ROLLBACK TRANSACTION");
+ } else {
+ ScopedStatement end_transaction(PrepareQuery(dbhandle,
+ "COMMIT TRANSACTION"));
+ disk_full = !StepDone(end_transaction.get(), "COMMIT TRANSACTION");
+ }
+ }
+
+ DoneDBTransaction:
+ return !disk_full;
+}
+
+DirOpenResult DirectoryBackingStore::InitializeTables() {
+ StatementExecutor se(load_dbhandle_);
+ if (SQLITE_DONE != se.Exec("BEGIN EXCLUSIVE TRANSACTION")) {
+ return FAILED_DISK_FULL;
+ }
+ int version_on_disk = 0;
+
+ if (DoesTableExist(load_dbhandle_, "share_version")) {
+ ScopedStatement version_query(
+ PrepareQuery(load_dbhandle_, "SELECT data from share_version"));
+ int query_result = sqlite3_step(version_query.get());
+ if (SQLITE_ROW == query_result) {
+ version_on_disk = sqlite3_column_int(version_query.get(), 0);
+ }
+ version_query.reset(NULL);
+ }
+ if (version_on_disk != kCurrentDBVersion) {
+ if (version_on_disk > kCurrentDBVersion) {
+ ExecOrDie(load_dbhandle_, "END TRANSACTION");
+ return FAILED_NEWER_VERSION;
+ }
+ LOG(INFO) << "Old/null sync database, version " << version_on_disk;
+ // Delete the existing database (if any), and create a freshone.
+ if (se.healthy()) {
+ DropAllTables();
+ se.set_result(CreateTables());
+ }
+ }
+ if (SQLITE_DONE == se.result()) {
+ {
+ ScopedStatement statement(PrepareQuery(load_dbhandle_,
+ "SELECT db_create_version, db_create_time FROM share_info"));
+ CHECK(SQLITE_ROW == sqlite3_step(statement.get()));
+ PathString db_create_version;
+ int db_create_time;
+ GetColumn(statement.get(), 0, &db_create_version);
+ GetColumn(statement.get(), 1, &db_create_time);
+ statement.reset(0);
+ LOG(INFO) << "DB created at " << db_create_time << " by version " <<
+ db_create_version;
+ }
+ // COMMIT TRANSACTION rolls back on failure.
+ if (SQLITE_DONE == Exec(load_dbhandle_, "COMMIT TRANSACTION"))
+ return OPENED;
+ } else {
+ ExecOrDie(load_dbhandle_, "ROLLBACK TRANSACTION");
+ }
+ return FAILED_DISK_FULL;
+}
+
+void DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
+ string select;
+ select.reserve(kUpdateStatementBufferSize);
+ select.append("SELECT");
+ const char* joiner = " ";
+ // Be explicit in SELECT order to match up with UnpackEntry.
+ for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
+ select.append(joiner);
+ select.append(ColumnName(i));
+ joiner = ", ";
+ }
+ select.append(" FROM metas ");
+ ScopedStatement statement(PrepareQuery(load_dbhandle_, select.c_str()));
+ base::hash_set<int> handles;
+ while (EntryKernel* kernel = UnpackEntry(statement.get())) {
+ DCHECK(handles.insert(kernel->ref(META_HANDLE)).second); // Only in debug.
+ entry_bucket->insert(kernel);
+ }
+}
+
+void DirectoryBackingStore::LoadExtendedAttributes(
+ ExtendedAttributes* xattrs_bucket) {
+ ScopedStatement statement(PrepareQuery(load_dbhandle_,
+ "SELECT metahandle, key, value FROM extended_attributes"));
+ int step_result = sqlite3_step(statement.get());
+ while (SQLITE_ROW == step_result) {
+ int64 metahandle;
+ PathString path_string_key;
+ ExtendedAttributeValue val;
+ val.is_deleted = false;
+ GetColumn(statement.get(), 0, &metahandle);
+ GetColumn(statement.get(), 1, &path_string_key);
+ GetColumn(statement.get(), 2, &(val.value));
+ ExtendedAttributeKey key(metahandle, path_string_key);
+ xattrs_bucket->insert(std::make_pair(key, val));
+ step_result = sqlite3_step(statement.get());
+ }
+ CHECK(SQLITE_DONE == step_result);
+}
+
+void DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
+ ScopedStatement query(PrepareQuery(load_dbhandle_,
+ "SELECT last_sync_timestamp, initial_sync_ended, "
+ "store_birthday, next_id, cache_guid "
+ "FROM share_info"));
+ CHECK(SQLITE_ROW == sqlite3_step(query.get()));
+ GetColumn(query.get(), 0, &info->kernel_info.last_sync_timestamp);
+ GetColumn(query.get(), 1, &info->kernel_info.initial_sync_ended);
+ GetColumn(query.get(), 2, &info->kernel_info.store_birthday);
+ GetColumn(query.get(), 3, &info->kernel_info.next_id);
+ GetColumn(query.get(), 4, &info->cache_guid);
+ query.reset(PrepareQuery(load_dbhandle_,
+ "SELECT MAX(metahandle) FROM metas"));
+ CHECK(SQLITE_ROW == sqlite3_step(query.get()));
+ GetColumn(query.get(), 0, &info->max_metahandle);
+}
+
+bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
+ return entry.ref(IS_NEW) ? SaveNewEntryToDB(entry) : UpdateEntryToDB(entry);
+}
+
+bool DirectoryBackingStore::SaveNewEntryToDB(const EntryKernel& entry) {
+ DCHECK(save_dbhandle_);
+ // TODO(timsteele): Should use INSERT OR REPLACE and eliminate one of
+ // the SaveNew / UpdateEntry code paths.
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ query.append("INSERT INTO metas ");
+ string values;
+ values.reserve(kUpdateStatementBufferSize);
+ values.append("VALUES ");
+ const char* separator = "( ";
+ int i = 0;
+ for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i]) {
+ query.append(separator);
+ values.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ values.append("?");
+ }
+ }
+ query.append(" ) ");
+ values.append(" )");
+ query.append(values);
+ ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str()));
+ BindDirtyFields(entry, statement.get());
+ return StepDone(statement.get(), "SaveNewEntryToDB()") &&
+ 1 == sqlite3_changes(save_dbhandle_);
+}
+
+bool DirectoryBackingStore::UpdateEntryToDB(const EntryKernel& entry) {
+ DCHECK(save_dbhandle_);
+ string query;
+ query.reserve(kUpdateStatementBufferSize);
+ query.append("UPDATE metas ");
+ const char* separator = "SET ";
+ int i;
+ for (i = BEGIN_FIELDS; i < BLOB_FIELDS_END; ++i) {
+ if (entry.dirty[i]) {
+ query.append(separator);
+ separator = ", ";
+ query.append(ColumnName(i));
+ query.append(" = ? ");
+ }
+ }
+ query.append("WHERE metahandle = ?");
+ ScopedStatement const statement(PrepareQuery(save_dbhandle_, query.c_str()));
+ const int var_count = BindDirtyFields(entry, statement.get());
+ BindArg(statement.get(), entry.ref(META_HANDLE), var_count + 1);
+ return StepDone(statement.get(), "UpdateEntryToDB()") &&
+ 1 == sqlite3_changes(save_dbhandle_);
+}
+
+bool DirectoryBackingStore::SaveExtendedAttributeToDB(
+ ExtendedAttributes::const_iterator i) {
+ DCHECK(save_dbhandle_);
+ ScopedStatement insert(PrepareQuery(save_dbhandle_,
+ "INSERT INTO extended_attributes "
+ "(metahandle, key, value) "
+ "values ( ?, ?, ? )",
+ i->first.metahandle, i->first.key, i->second.value));
+ return StepDone(insert.get(), "SaveExtendedAttributeToDB()")
+ && 1 == sqlite3_changes(LazyGetSaveHandle());
+}
+
+bool DirectoryBackingStore::DeleteExtendedAttributeFromDB(
+ ExtendedAttributes::const_iterator i) {
+ DCHECK(save_dbhandle_);
+ ScopedStatement delete_attribute(PrepareQuery(save_dbhandle_,
+ "DELETE FROM extended_attributes "
+ "WHERE metahandle = ? AND key = ? ",
+ i->first.metahandle, i->first.key));
+ if (!StepDone(delete_attribute.get(), "DeleteExtendedAttributeFromDB()")) {
+ LOG(ERROR) << "DeleteExtendedAttributeFromDB(),StepDone() failed "
+ << "for metahandle: " << i->first.metahandle << " key: "
+ << i->first.key;
+ return false;
+ }
+ // The attribute may have never been saved to the database if it was
+ // created and then immediately deleted. So don't check that we
+ // deleted exactly 1 row.
+ return true;
+}
+
+void DirectoryBackingStore::DropDeletedEntries() {
+ static const char delete_extended_attributes[] =
+ "DELETE FROM extended_attributes WHERE metahandle IN "
+ "(SELECT metahandle from death_row)";
+ static const char delete_metas[] = "DELETE FROM metas WHERE metahandle IN "
+ "(SELECT metahandle from death_row)";
+ // Put all statements into a transaction for better performance
+ ExecOrDie(load_dbhandle_, "BEGIN TRANSACTION");
+ ExecOrDie(load_dbhandle_, "CREATE TEMP TABLE death_row (metahandle BIGINT)");
+ ExecOrDie(load_dbhandle_, "INSERT INTO death_row "
+ "SELECT metahandle from metas WHERE is_del > 0 "
+ " AND is_unsynced < 1"
+ " AND is_unapplied_update < 1");
+ StatementExecutor x(load_dbhandle_);
+ x.Exec(delete_extended_attributes);
+ x.Exec(delete_metas);
+ ExecOrDie(load_dbhandle_, "DROP TABLE death_row");
+ ExecOrDie(load_dbhandle_, "COMMIT TRANSACTION");
+}
+
+void DirectoryBackingStore::SafeDropTable(const char* table_name) {
+ string query = "DROP TABLE IF EXISTS ";
+ query.append(table_name);
+ const char* tail;
+ sqlite3_stmt* statement = NULL;
+ if (SQLITE_OK == sqlite3_prepare(load_dbhandle_, query.data(),
+ query.size(), &statement, &tail)) {
+ CHECK(SQLITE_DONE == sqlite3_step(statement));
+ }
+ sqlite3_finalize(statement);
+}
+
+int DirectoryBackingStore::CreateExtendedAttributeTable() {
+ SafeDropTable("extended_attributes");
+ LOG(INFO) << "CreateExtendedAttributeTable";
+ return Exec(load_dbhandle_, "CREATE TABLE extended_attributes("
+ "metahandle bigint, "
+ "key varchar(127), "
+ "value blob, "
+ "PRIMARY KEY(metahandle, key) ON CONFLICT REPLACE)");
+}
+
+void DirectoryBackingStore::DropAllTables() {
+ SafeDropTable("metas");
+ SafeDropTable("share_info");
+ SafeDropTable("share_version");
+ SafeDropTable("extended_attributes");
+}
+
+int DirectoryBackingStore::CreateTables() {
+ LOG(INFO) << "First run, creating tables";
+ // Create two little tables share_version and share_info
+ int result = Exec(load_dbhandle_, "CREATE TABLE share_version ("
+ "id VARCHAR(128) primary key, data INT)");
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO share_version VALUES(?, ?)",
+ dir_name_, kCurrentDBVersion);
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "CREATE TABLE share_info ("
+ "id VARCHAR(128) primary key, "
+ "last_sync_timestamp INT, "
+ "name VARCHAR(128), "
+ // Gets set if the syncer ever gets updates from the
+ // server and the server returns 0. Lets us detect the
+ // end of the initial sync.
+ "initial_sync_ended BIT default 0, "
+ "store_birthday VARCHAR(256), "
+ "db_create_version VARCHAR(128), "
+ "db_create_time int, "
+ "next_id bigint default -2, "
+ "cache_guid VARCHAR(32))");
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO share_info VALUES"
+ "(?, " // id
+ "0, " // last_sync_timestamp
+ "?, " // name
+ "?, " // initial_sync_ended
+ "?, " // store_birthday
+ "?, " // db_create_version
+ "?, " // db_create_time
+ "-2, " // next_id
+ "?)", // cache_guid
+ dir_name_, // id
+ dir_name_, // name
+ false, // initial_sync_ended
+ "", // store_birthday
+ SYNC_ENGINE_VERSION_STRING, // db_create_version
+ static_cast<int32>(time(0)), // db_create_time
+ GenerateCacheGUID()); // cache_guid
+ // Create the big metas table.
+ string query = "CREATE TABLE metas " + ComposeCreateTableColumnSpecs
+ (g_metas_columns, g_metas_columns + ARRAYSIZE(g_metas_columns));
+ result = SQLITE_DONE != result ? result : Exec(load_dbhandle_, query.c_str());
+ // Insert the entry for the root into the metas table.
+ const int64 now = Now();
+ result = SQLITE_DONE != result ? result :
+ Exec(load_dbhandle_, "INSERT INTO metas "
+ "( id, metahandle, is_dir, ctime, mtime) "
+ "VALUES ( \"r\", 1, 1, ?, ?)",
+ now, now);
+ result = SQLITE_DONE != result ? result : CreateExtendedAttributeTable();
+ return result;
+}
+
+sqlite3* DirectoryBackingStore::LazyGetSaveHandle() {
+ if (!save_dbhandle_ && !OpenAndConfigureHandleHelper(&save_dbhandle_)) {
+ DCHECK(FALSE) << "Unable to open handle for saving";
+ return NULL;
+ }
+ return save_dbhandle_;
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
new file mode 100644
index 0000000..a0bf8b1
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_backing_store.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
+
+#include <set>
+
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+
+extern "C" {
+struct sqlite3;
+struct sqlite3_stmt;
+}
+
+namespace syncable {
+
+struct ColumnSpec;
+typedef Directory::MetahandlesIndex MetahandlesIndex;
+
+// Provides sqlite3-based persistence for a syncable::Directory object. You can
+// load all the persisted data to prime a syncable::Directory on startup by
+// invoking Load. The only other thing you (or more correctly, a Directory)
+// can do here is save any changes that have occurred since calling Load, which
+// can be done periodically as often as desired*
+//
+// * If you only ever use a DirectoryBackingStore (DBS) from a single thread
+// then you can stop reading now. This is implemented using sqlite3, which
+// requires that each thread accesses a DB via a handle (sqlite3*) opened by
+// sqlite_open for that thread and only that thread. To avoid complicated TLS
+// logic to swap handles in-and-out as different threads try to get a hold of a
+// DBS, the DBS does two things:
+// 1. Uses a separate handle for Load()ing which is closed as soon as loading
+// finishes, and
+// 2. Requires that SaveChanges *only* be called from a single thread, and that
+// thread *must* be the thread that owns / is responsible for destroying
+// the DBS.
+// This way, any thread may open a Directory (which today can be either the
+// AuthWatcherThread or SyncCoreThread) and Load its DBS. The first time
+// SaveChanges is called a new sqlite3 handle is created, and it will get closed
+// when the DBS is destroyed, which is the reason for the requirement that the
+// thread that "uses" the DBS is the thread that destroys it.
+class DirectoryBackingStore {
+ public:
+ DirectoryBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath);
+
+ virtual ~DirectoryBackingStore();
+
+ // Loads and drops all currently persisted meta entries into
+ // |entry_bucket|, all currently persisted xattrs in |xattrs_bucket|,
+ // and loads appropriate persisted kernel info in |info_bucket|.
+ // NOTE: On success (return value of OPENED), the buckets are populated with
+ // newly allocated items, meaning ownership is bestowed upon the caller.
+ DirOpenResult Load(MetahandlesIndex* entry_bucket,
+ ExtendedAttributes* xattrs_bucket,
+ Directory::KernelLoadInfo* kernel_load_info);
+
+ // Updates the on-disk store with the input |snapshot| as a database
+ // transaction. Does NOT open any syncable transactions as this would cause
+ // opening transactions elsewhere to block on synchronous I/O.
+ // DO NOT CALL THIS FROM MORE THAN ONE THREAD EVER. Also, whichever thread
+ // calls SaveChanges *must* be the thread that owns/destroys |this|.
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot);
+
+ private:
+ // General Directory initialization and load helpers.
+ DirOpenResult InitializeTables();
+ // Returns an sqlite return code, usually SQLITE_DONE.
+ int CreateTables();
+ int CreateExtendedAttributeTable();
+ // We don't need to load any synced and applied deleted entries, we can
+ // in fact just purge them forever on startup.
+ void DropDeletedEntries();
+ // Drops a table if it exists, harmless if the table did not already exist.
+ void SafeDropTable(const char* table_name);
+
+ // Load helpers for entries and attributes.
+ void LoadEntries(MetahandlesIndex* entry_bucket);
+ void LoadExtendedAttributes(ExtendedAttributes* xattrs_bucket);
+ void LoadInfo(Directory::KernelLoadInfo* info);
+
+ // Save/update helpers for entries. Return false if sqlite commit fails.
+ bool SaveEntryToDB(const EntryKernel& entry);
+ bool SaveNewEntryToDB(const EntryKernel& entry);
+ bool UpdateEntryToDB(const EntryKernel& entry);
+
+ // Save/update helpers for attributes. Return false if sqlite commit fails.
+ bool SaveExtendedAttributeToDB(ExtendedAttributes::const_iterator i);
+ bool DeleteExtendedAttributeFromDB(ExtendedAttributes::const_iterator i);
+
+ // Creates a new sqlite3 handle to the backing database. Sets sqlite operation
+ // timeout preferences and registers our overridden sqlite3 operators for
+ // said handle. Returns true on success, false if the sqlite open operation
+ // did not succeed.
+ bool OpenAndConfigureHandleHelper(sqlite3** handle) const;
+
+ // Lazy creation of save_dbhandle_ for use by SaveChanges code path.
+ sqlite3* LazyGetSaveHandle();
+
+ // Drop all tables in preparation for reinitialization.
+ void DropAllTables();
+
+ // The handle to our sqlite on-disk store for initialization and loading, and
+ // for saving changes periodically via SaveChanges, respectively.
+ // TODO(timsteele): We should only have one handle here. The reason we need
+ // two at the moment is because the DB can be opened by either the AuthWatcher
+ // or SyncCore threads, but SaveChanges is always called by the latter. We
+ // need to change initialization so the DB is only accessed from one thread.
+ sqlite3* load_dbhandle_;
+ sqlite3* save_dbhandle_;
+
+ PathString dir_name_;
+ PathString backing_filepath_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirectoryBackingStore);
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_BACKING_STORE_H_
diff --git a/chrome/browser/sync/syncable/directory_event.h b/chrome/browser/sync/syncable/directory_event.h
new file mode 100644
index 0000000..638ce6a
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_event.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
+
+namespace syncable {
+
+// This kind of Event is emitted when the state of a Directory object
+// changes somehow, such as the directory being opened or closed.
+// Don't confuse it with a DirectoryChangeEvent, which is what happens
+// when one or more of the Entry contents of a Directory have been updated.
+enum DirectoryEvent {
+ DIRECTORY_CLOSED,
+ DIRECTORY_DESTROYED,
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_EVENT_H_
diff --git a/chrome/browser/sync/syncable/directory_manager.cc b/chrome/browser/sync/syncable/directory_manager.cc
new file mode 100644
index 0000000..b044d49
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_manager.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/directory_manager.h"
+
+#include <map>
+#include <set>
+#include <iterator>
+
+#include "base/logging.h"
+#include "base/port.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+namespace syncable {
+
+static const PSTR_CHAR kSyncDataDatabaseFilename[] = PSTR("SyncData.sqlite3");
+
+DirectoryManagerEvent DirectoryManagerShutdownEvent() {
+ DirectoryManagerEvent event;
+ event.what_happened = DirectoryManagerEvent::SHUTDOWN;
+ return event;
+}
+
+// static
+const PathString DirectoryManager::GetSyncDataDatabaseFilename() {
+ return PathString(kSyncDataDatabaseFilename);
+}
+
+const PathString DirectoryManager::GetSyncDataDatabasePath() const {
+ PathString path(root_path_);
+ path.append(kSyncDataDatabaseFilename);
+ return path;
+}
+
+DirectoryManager::DirectoryManager(const PathString& path)
+ : root_path_(AppendSlash(path)),
+ channel_(new Channel(DirectoryManagerShutdownEvent())),
+ managed_directory_(NULL) {
+ CHECK(0 == pthread_mutex_init(&mutex_, NULL));
+}
+
+DirectoryManager::~DirectoryManager() {
+ DCHECK_EQ(managed_directory_, static_cast<Directory*>(NULL))
+ << "Dir " << managed_directory_->name() << " not closed!";
+ pthread_mutex_lock(&mutex_);
+ delete channel_;
+ pthread_mutex_unlock(&mutex_);
+ CHECK(0 == pthread_mutex_destroy(&mutex_));
+}
+
+bool DirectoryManager::Open(const PathString& name) {
+ bool was_open = false;
+ const DirOpenResult result = OpenImpl(name,
+ GetSyncDataDatabasePath(), &was_open);
+ if (!was_open) {
+ DirectoryManagerEvent event;
+ event.dirname = name;
+ if (syncable::OPENED == result) {
+ event.what_happened = DirectoryManagerEvent::OPENED;
+ } else {
+ event.what_happened = DirectoryManagerEvent::OPEN_FAILED;
+ event.error = result;
+ }
+ channel_->NotifyListeners(event);
+ }
+ return syncable::OPENED == result;
+}
+
+// Opens a directory. Returns false on error.
+DirOpenResult DirectoryManager::OpenImpl(const PathString& name,
+ const PathString& path,
+ bool* was_open) {
+ pthread_mutex_lock(&mutex_);
+ // Check to see if it's already open.
+ bool opened = false;
+ if (managed_directory_) {
+ DCHECK_EQ(ComparePathNames(name, managed_directory_->name()), 0)
+ << "Can't open more than one directory.";
+ opened = *was_open = true;
+ }
+ pthread_mutex_unlock(&mutex_);
+ if (opened)
+ return syncable::OPENED;
+ // Otherwise, open it.
+
+ Directory* dir = new Directory;
+ const DirOpenResult result = dir->Open(path, name);
+ if (syncable::OPENED == result) {
+ pthread_mutex_lock(&mutex_);
+ managed_directory_ = dir;
+ pthread_mutex_unlock(&mutex_);
+ } else {
+ delete dir;
+ }
+ return result;
+}
+
+// Marks a directory as closed. It might take a while until all the
+// file handles and resources are freed by other threads.
+void DirectoryManager::Close(const PathString& name) {
+ // Erase from mounted and opened directory lists.
+ pthread_mutex_lock(&mutex_);
+
+ if (!managed_directory_ ||
+ ComparePathNames(name, managed_directory_->name()) != 0) {
+ // It wasn't open;
+ pthread_mutex_unlock(&mutex_);
+ return;
+ }
+ pthread_mutex_unlock(&mutex_);
+
+ // Notify listeners.
+ managed_directory_->channel()->NotifyListeners(DIRECTORY_CLOSED);
+ DirectoryManagerEvent event = { DirectoryManagerEvent::CLOSED, name };
+ channel_->NotifyListeners(event);
+
+ delete managed_directory_;
+ managed_directory_ = NULL;
+}
+
+// Marks all directories as closed. It might take a while until all the
+// file handles and resources are freed by other threads.
+void DirectoryManager::CloseAllDirectories() {
+ if (managed_directory_)
+ Close(managed_directory_->name());
+}
+
+void DirectoryManager::FinalSaveChangesForAll() {
+ pthread_mutex_lock(&mutex_);
+ if (managed_directory_)
+ managed_directory_->SaveChanges();
+ pthread_mutex_unlock(&mutex_);
+}
+
+void DirectoryManager::GetOpenDirectories(DirNames* result) {
+ result->clear();
+ pthread_mutex_lock(&mutex_);
+ if (managed_directory_)
+ result->push_back(managed_directory_->name());
+ pthread_mutex_unlock(&mutex_);
+}
+
+ScopedDirLookup::ScopedDirLookup(DirectoryManager* dirman,
+ const PathString& name) : dirman_(dirman) {
+ dir_ = dirman->managed_directory_ &&
+ (ComparePathNames(name, dirman->managed_directory_->name()) == 0) ?
+ dirman->managed_directory_ : NULL;
+ good_ = dir_;
+ good_checked_ = false;
+}
+
+ScopedDirLookup::~ScopedDirLookup() { }
+
+Directory* ScopedDirLookup::operator -> () const {
+ CHECK(good_checked_);
+ DCHECK(good_);
+ return dir_;
+}
+
+ScopedDirLookup::operator Directory* () const {
+ CHECK(good_checked_);
+ DCHECK(good_);
+ return dir_;
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/directory_manager.h b/chrome/browser/sync/syncable/directory_manager.h
new file mode 100644
index 0000000..f937539
--- /dev/null
+++ b/chrome/browser/sync/syncable/directory_manager.h
@@ -0,0 +1,128 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This used to do a lot of TLS-based management of multiple Directory objects.
+// We now can access Directory objects from any thread for general purpose
+// operations and we only ever have one Directory, so this class isn't doing
+// anything too fancy besides keeping calling and access conventions the same
+// for now.
+// TODO(timsteele): We can probably nuke this entire class and use raw
+// Directory objects everywhere.
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
+
+#include <pthread.h>
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/path_name_cmp.h"
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace sync_api { class BaseTransaction; }
+
+namespace syncable {
+
+struct DirectoryManagerEvent {
+ enum {
+ OPEN_FAILED,
+ OPENED,
+ CLOSED,
+ CLOSED_ALL,
+ SHUTDOWN,
+ } what_happened;
+ PathString dirname;
+ DirOpenResult error; // Only for OPEN_FAILED.
+ typedef DirectoryManagerEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& event) {
+ return SHUTDOWN == event.what_happened;
+ }
+};
+
+DirectoryManagerEvent DirectoryManagerShutdownEvent();
+
+class DirectoryManager {
+ public:
+ typedef EventChannel<DirectoryManagerEvent> Channel;
+
+ // root_path specifies where db is stored.
+ explicit DirectoryManager(const PathString& root_path);
+ ~DirectoryManager();
+
+ static const PathString GetSyncDataDatabaseFilename();
+ const PathString GetSyncDataDatabasePath() const;
+
+ // Opens a directory. Returns false on error.
+ // Name parameter is the the user's login,
+ // MUST already have been converted to a common case.
+ bool Open(const PathString& name);
+
+ // Marks a directory as closed. It might take a while until all the
+ // file handles and resources are freed by other threads.
+ void Close(const PathString& name);
+
+ // Marks all directories as closed. It might take a while until all the
+ // file handles and resources are freed by other threads.
+ void CloseAllDirectories();
+
+ // Should be called at App exit.
+ void FinalSaveChangesForAll();
+
+ // Gets the list of currently open directory names.
+ typedef std::vector<PathString> DirNames;
+ void GetOpenDirectories(DirNames* result);
+
+ Channel* channel() const { return channel_; }
+
+ protected:
+ DirOpenResult OpenImpl(const PathString& name, const PathString& path,
+ bool* was_open);
+
+ // Helpers for friend class ScopedDirLookup:
+ friend class ScopedDirLookup;
+
+ const PathString root_path_;
+ // protects managed_directory_
+ mutable pthread_mutex_t mutex_;
+ Directory* managed_directory_;
+
+ Channel* const channel_;
+
+ private:
+
+ DISALLOW_COPY_AND_ASSIGN(DirectoryManager);
+};
+
+
+class ScopedDirLookup {
+ public:
+ ScopedDirLookup(DirectoryManager* dirman, const PathString& name);
+ ~ScopedDirLookup();
+
+ inline bool good() {
+ good_checked_ = true;
+ return good_;
+ }
+ Directory* operator -> () const;
+ operator Directory* () const;
+
+ protected: // Don't allow creation on heap, except by sync API wrapper.
+ friend class sync_api::BaseTransaction;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ Directory* dir_;
+ bool good_;
+ // Ensure that the programmer checks good before using the ScopedDirLookup
+ // This member should can be removed if it ever shows up in profiling
+ bool good_checked_;
+ DirectoryManager* const dirman_;
+};
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_DIRECTORY_MANAGER_H_
diff --git a/chrome/browser/sync/syncable/path_name_cmp.h b/chrome/browser/sync/syncable/path_name_cmp.h
new file mode 100644
index 0000000..1478a52
--- /dev/null
+++ b/chrome/browser/sync/syncable/path_name_cmp.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
+
+#include "chrome/browser/sync/util/sync_types.h"
+
+namespace syncable {
+
+struct LessPathNames {
+ bool operator() (const PathString&, const PathString&) const;
+};
+
+int ComparePathNames(const PathString& a, const PathString& b);
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_PATH_NAME_CMP_H_
diff --git a/chrome/browser/sync/syncable/syncable-inl.h b/chrome/browser/sync/syncable/syncable-inl.h
new file mode 100644
index 0000000..81723bf
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable-inl.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
+
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "chrome/browser/sync/util/row_iterator.h"
+
+namespace syncable {
+
+template <typename FieldType, FieldType field_index>
+class LessField {
+ public:
+ inline bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ return a->ref(field_index) < b->ref(field_index);
+ }
+};
+
+struct IdRowTraits {
+ typedef syncable::Id RowType;
+ void Extract(sqlite3_stmt* statement, syncable::Id* id) const {
+ GetColumn(statement, 0, id);
+ }
+};
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_INL_H_
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
new file mode 100644
index 0000000..b997a5b
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -0,0 +1,2002 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#ifdef OS_MACOSX
+#include <CoreFoundation/CoreFoundation.h>
+#elif defined(OS_LINUX)
+#include <glib.h>
+#elif defined(OS_WINDOWS)
+#include <shlwapi.h> // for PathMatchSpec
+#endif
+
+#include <algorithm>
+#include <functional>
+#include <iomanip>
+#include <iterator>
+#include <set>
+#include <string>
+
+#include "base/hash_tables.h"
+#include "base/logging.h"
+#include "base/perftimer.h"
+#include "base/scoped_ptr.h"
+#include "base/time.h"
+#include "chrome/browser/sync/engine/syncer.h"
+#include "chrome/browser/sync/engine/syncer_util.h"
+#include "chrome/browser/sync/protocol/service_constants.h"
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/syncable/syncable-inl.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/syncable/syncable_columns.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/crypto_helpers.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/fast_dump.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+
+namespace {
+enum InvariantCheckLevel {
+ OFF = 0,
+ VERIFY_IN_MEMORY = 1,
+ FULL_DB_VERIFICATION = 2
+};
+
+static const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
+
+// Max number of milliseconds to spend checking syncable entry invariants
+static const int kInvariantCheckMaxMs = 50;
+} // namespace
+
+// if sizeof(time_t) != sizeof(int32) we need to alter or expand the sqlite
+// datatype.
+COMPILE_ASSERT(sizeof(time_t) == sizeof(int32), time_t_is_not_int32);
+
+using browser_sync::FastDump;
+using browser_sync::SyncerUtil;
+using std::string;
+
+
+namespace syncable {
+
+int64 Now() {
+#ifdef OS_WINDOWS
+ FILETIME filetime;
+ SYSTEMTIME systime;
+ GetSystemTime(&systime);
+ SystemTimeToFileTime(&systime, &filetime);
+ // MSDN recommends converting via memcpy like this.
+ LARGE_INTEGER n;
+ memcpy(&n, &filetime, sizeof(filetime));
+ return n.QuadPart;
+#elif (defined(OS_LINUX) || defined(OS_MACOSX))
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return static_cast<int64>(tv.tv_sec);
+#else
+#error NEED OS SPECIFIC Now() implementation
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Compare functions and hashes for the indices.
+
+// Callback for sqlite3
+int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
+ const void* b) {
+#ifdef OS_WINDOWS
+ DCHECK_EQ(0, a_bytes % 2);
+ DCHECK_EQ(0, b_bytes % 2);
+ int result = CompareString(LOCALE_INVARIANT, NORM_IGNORECASE,
+ static_cast<const PathChar*>(a), a_bytes / 2,
+ static_cast<const PathChar*>(b), b_bytes / 2);
+ CHECK(0 != result) << "Error comparing strings: " << GetLastError();
+ return result - 2; // Convert to -1, 0, 1
+#elif defined(OS_LINUX)
+// misnomer for Linux. These are already utf8 bit strings.
+ gchar *case_folded_a;
+ gchar *case_folded_b;
+ GError *err = NULL;
+ case_folded_a = g_utf8_casefold(reinterpret_cast<const gchar*>(a), a_bytes);
+ CHECK(case_folded_a != NULL) << "g_utf8_casefold failed";
+ case_folded_b = g_utf8_casefold(reinterpret_cast<const gchar*>(b), b_bytes);
+ CHECK(case_folded_b != NULL) << "g_utf8_casefold failed";
+ gint result = g_utf8_collate(case_folded_a, case_folded_b);
+ g_free(case_folded_a);
+ g_free(case_folded_b);
+ if (result < 0) return -1;
+ if (result > 0) return 1;
+ return 0;
+#elif defined(OS_MACOSX)
+ CFStringRef a_str;
+ CFStringRef b_str;
+ a_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(a),
+ a_bytes, kCFStringEncodingUTF8, FALSE);
+ b_str = CFStringCreateWithBytes(NULL, reinterpret_cast<const UInt8*>(b),
+ b_bytes, kCFStringEncodingUTF8, FALSE);
+ CFComparisonResult res;
+ res = CFStringCompare(a_str, b_str, kCFCompareCaseInsensitive);
+ CFRelease(a_str);
+ CFRelease(b_str);
+ return res;
+#else
+#error no ComparePathNames16() for your OS
+#endif
+}
+
+template <Int64Field field_index>
+class SameField {
+ public:
+ inline bool operator()(const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ return a->ref(field_index) == b->ref(field_index);
+ }
+};
+
+template <Int64Field field_index>
+class HashField {
+ public:
+ inline size_t operator()(const syncable::EntryKernel* a) const {
+ return hasher_(a->ref(field_index));
+ }
+ base::hash_set<int64> hasher_;
+};
+
+// TODO(ncarter): Rename!
+int ComparePathNames(const PathString& a, const PathString& b) {
+ const size_t val_size = sizeof(PathString::value_type);
+ return ComparePathNames16(NULL, a.size() * val_size, a.data(),
+ b.size() * val_size, b.data());
+}
+
+class LessParentIdAndNames {
+ public:
+ bool operator() (const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ if (a->ref(PARENT_ID) != b->ref(PARENT_ID))
+ return a->ref(PARENT_ID) < b->ref(PARENT_ID);
+ return ComparePathNames(a->ref(NAME), b->ref(NAME)) < 0;
+ }
+};
+
+bool LessPathNames::operator() (const PathString& a,
+ const PathString& b) const {
+ return ComparePathNames(a, b) < 0;
+}
+
+// static
+Name Name::FromEntryKernel(EntryKernel* kernel) {
+ PathString& sync_name_ref = kernel->ref(UNSANITIZED_NAME).empty() ?
+ kernel->ref(NAME) : kernel->ref(UNSANITIZED_NAME);
+ return Name(kernel->ref(NAME), sync_name_ref, kernel->ref(NON_UNIQUE_NAME));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Directory
+
+static const DirectoryChangeEvent kShutdownChangesEvent =
+ { DirectoryChangeEvent::SHUTDOWN, 0, 0 };
+
+void DestroyThreadNodeKey(void* vnode) {
+ ThreadNode* const node = reinterpret_cast<ThreadNode*>(vnode);
+ CHECK(!node->in_list)
+ << "\nThread exited while holding the transaction mutex!\n" << *node;
+ delete node;
+}
+
+Directory::Kernel::Kernel(const PathString& db_path,
+ const PathString& name,
+ const KernelLoadInfo& info)
+: db_path(db_path),
+ refcount(1),
+ name_(name),
+ metahandles_index(new Directory::MetahandlesIndex),
+ ids_index(new Directory::IdsIndex),
+ parent_id_and_names_index(new Directory::ParentIdAndNamesIndex),
+ extended_attributes(new ExtendedAttributes),
+ unapplied_update_metahandles(new MetahandleSet),
+ unsynced_metahandles(new MetahandleSet),
+ channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)),
+ changes_channel(new Directory::ChangesChannel(kShutdownChangesEvent)),
+ last_sync_timestamp_(info.kernel_info.last_sync_timestamp),
+ initial_sync_ended_(info.kernel_info.initial_sync_ended),
+ store_birthday_(info.kernel_info.store_birthday),
+ next_id(info.kernel_info.next_id),
+ cache_guid_(info.cache_guid),
+ next_metahandle(info.max_metahandle + 1) {
+ info_status_ = Directory::KERNEL_SHARE_INFO_VALID;
+ CHECK(0 == pthread_mutex_init(&mutex, NULL));
+ CHECK(0 == pthread_key_create(&thread_node_key, &DestroyThreadNodeKey));
+}
+
+inline void DeleteEntry(EntryKernel* kernel) {
+ delete kernel;
+}
+
+void Directory::Kernel::AddRef() {
+ base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
+}
+
+void Directory::Kernel::Release() {
+ if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
+ delete this;
+}
+
+Directory::Kernel::~Kernel() {
+ CHECK(0 == refcount);
+ delete channel;
+ delete changes_channel;
+ CHECK(0 == pthread_mutex_destroy(&mutex));
+ pthread_key_delete(thread_node_key);
+ delete unsynced_metahandles;
+ delete unapplied_update_metahandles;
+ delete extended_attributes;
+ delete parent_id_and_names_index;
+ delete ids_index;
+ for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry);
+ delete metahandles_index;
+}
+
+Directory::Directory() : kernel_(NULL), store_(NULL) {
+}
+
+Directory::~Directory() {
+ Close();
+}
+
+BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec) {
+#ifdef OS_WINDOWS
+ // NB If we go Vista only this is easier:
+ // http://msdn2.microsoft.com/en-us/library/ms628611.aspx
+
+ // PathMatchSpec strips spaces from the start of pathspec, so we compare those
+ // ourselves.
+ const PathChar* pathname_ptr = pathname.c_str();
+ const PathChar* pathspec_ptr = pathspec.c_str();
+
+ while (*pathname_ptr == ' ' && *pathspec_ptr == ' ')
+ ++pathname_ptr, ++pathspec_ptr;
+
+ // if we have more inital spaces in the pathspec than in the pathname then the
+ // result from PathMatchSpec will be erronous
+ if (*pathspec_ptr == ' ')
+ return FALSE;
+
+ // PathMatchSpec also gets "confused" when there are ';' characters in name or
+ // in spec. So, if we match (f.i.) ";" with ";" PathMatchSpec will return
+ // FALSE (which is wrong). Luckily for us, we can easily fix this by
+ // substituting ';' with ':' which is illegal character in file name and
+ // we're not going to see it there. With ':' in path name and spec
+ // PathMatchSpec works fine.
+ if ((NULL == wcschr(pathname_ptr, L';')) &&
+ (NULL == wcschr(pathspec_ptr, L';'))) {
+ // No ';' in file name and in spec. Just pass it as it is.
+ return ::PathMatchSpec(pathname_ptr, pathspec_ptr);
+ }
+
+ // We need to subst ';' with ':' in both, name and spec
+ PathString name_subst(pathname_ptr);
+ PathString spec_subst(pathspec_ptr);
+
+ PathString::size_type index = name_subst.find(L';');
+ while (PathString::npos != index) {
+ name_subst[index] = L':';
+ index = name_subst.find(L';', index + 1);
+ }
+
+ index = spec_subst.find(L';');
+ while (PathString::npos != index) {
+ spec_subst[index] = L':';
+ index = spec_subst.find(L';', index + 1);
+ }
+
+ return ::PathMatchSpec(name_subst.c_str(), spec_subst.c_str());
+#else
+ return 0 == ComparePathNames(pathname, pathspec);
+#endif
+}
+
+DirOpenResult Directory::Open(const PathString& file_path,
+ const PathString& name) {
+ const DirOpenResult result = OpenImpl(file_path, name);
+ if (OPENED != result)
+ Close();
+ return result;
+}
+
+void Directory::InitializeIndices() {
+ MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
+ for (; it != kernel_->metahandles_index->end(); ++it) {
+ EntryKernel* entry = *it;
+ if (!entry->ref(IS_DEL))
+ kernel_->parent_id_and_names_index->insert(entry);
+ kernel_->ids_index->insert(entry);
+ if (entry->ref(IS_UNSYNCED))
+ kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
+ if (entry->ref(IS_UNAPPLIED_UPDATE))
+ kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE));
+ }
+}
+
+DirectoryBackingStore* Directory::CreateBackingStore(
+ const PathString& dir_name, const PathString& backing_filepath) {
+ return new DirectoryBackingStore(dir_name, backing_filepath);
+}
+
+DirOpenResult Directory::OpenImpl(const PathString& file_path,
+ const PathString& name) {
+ DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
+ const PathString db_path = ::GetFullPath(file_path);
+ store_ = CreateBackingStore(name, db_path);
+
+ KernelLoadInfo info;
+ // Temporary indicies before kernel_ initialized in case Load fails. We 0(1)
+ // swap these later.
+ MetahandlesIndex metas_bucket;
+ ExtendedAttributes xattrs_bucket;
+ DirOpenResult result = store_->Load(&metas_bucket, &xattrs_bucket, &info);
+ if (OPENED != result)
+ return result;
+
+ kernel_ = new Kernel(db_path, name, info);
+ kernel_->metahandles_index->swap(metas_bucket);
+ kernel_->extended_attributes->swap(xattrs_bucket);
+ InitializeIndices();
+ return OPENED;
+}
+
+void Directory::Close() {
+ if (store_)
+ delete store_;
+ store_ = NULL;
+ if (kernel_) {
+ bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
+ DCHECK(del) << "Kernel should only have a single ref";
+ if (del)
+ delete kernel_;
+ kernel_ = NULL;
+ }
+}
+
+EntryKernel* Directory::GetEntryById(const Id& id) {
+ ScopedKernelLock lock(this);
+ return GetEntryById(id, &lock);
+}
+
+EntryKernel* Directory::GetEntryById(const Id& id,
+ ScopedKernelLock* const lock) {
+ DCHECK(kernel_);
+ // First look up in memory
+ kernel_->needle.ref(ID) = id;
+ IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
+ if (id_found != kernel_->ids_index->end()) {
+ // Found it in memory. Easy.
+ return *id_found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByTag(const PathString& tag) {
+ ScopedKernelLock lock(this);
+ DCHECK(kernel_);
+ // We don't currently keep a separate index for the tags. Since tags
+ // only exist for server created items that are the first items
+ // to be created in a store, they should have small metahandles.
+ // So, we just iterate over the items in sorted metahandle order,
+ // looking for a match.
+ MetahandlesIndex& set = *kernel_->metahandles_index;
+ for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
+ if ((*i)->ref(SINGLETON_TAG) == tag) {
+ return *i;
+ }
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetEntryByHandle(const int64 metahandle) {
+ ScopedKernelLock lock(this);
+ return GetEntryByHandle(metahandle, &lock);
+}
+
+EntryKernel* Directory::GetEntryByHandle(const int64 metahandle,
+ ScopedKernelLock* lock) {
+ // Look up in memory
+ kernel_->needle.ref(META_HANDLE) = metahandle;
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ // Found it in memory. Easy.
+ return *found;
+ }
+ return NULL;
+}
+
+EntryKernel* Directory::GetChildWithName(const Id& parent_id,
+ const PathString& name) {
+ ScopedKernelLock lock(this);
+ return GetChildWithName(parent_id, name, &lock);
+}
+
+// Will return child entry if the folder is opened,
+// otherwise it will return NULL.
+EntryKernel* Directory::GetChildWithName(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock) {
+ PathString dbname = name;
+ EntryKernel* parent = GetEntryById(parent_id, lock);
+ if (parent == NULL)
+ return NULL;
+ return GetChildWithNameImpl(parent_id, dbname, lock);
+}
+
+// Will return child entry even when the folder is not
+// opened. This is used by syncer to apply update when folder is closed.
+EntryKernel* Directory::GetChildWithDBName(const Id& parent_id,
+ const PathString& name) {
+ ScopedKernelLock lock(this);
+ return GetChildWithNameImpl(parent_id, name, &lock);
+}
+
+EntryKernel* Directory::GetChildWithNameImpl(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock) {
+ // First look up in memory:
+ kernel_->needle.ref(NAME) = name;
+ kernel_->needle.ref(PARENT_ID) = parent_id;
+ ParentIdAndNamesIndex::iterator found =
+ kernel_->parent_id_and_names_index->find(&kernel_->needle);
+ if (found != kernel_->parent_id_and_names_index->end()) {
+ // Found it in memory. Easy.
+ return *found;
+ }
+ return NULL;
+}
+
+// An interface to specify the details of which children
+// GetChildHandles() is looking for.
+struct PathMatcher {
+ explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { }
+ virtual ~PathMatcher() { }
+ enum MatchType {
+ NO_MATCH,
+ MATCH,
+ // Means we found the only entry we're looking for in
+ // memory so we don't need to check the DB.
+ EXACT_MATCH
+ };
+ virtual MatchType PathMatches(const PathString& path) = 0;
+ typedef Directory::ParentIdAndNamesIndex Index;
+ virtual Index::iterator lower_bound(Index* index) = 0;
+ virtual Index::iterator upper_bound(Index* index) = 0;
+ const Id parent_id_;
+ EntryKernel needle_;
+};
+
+// Matches all children.
+struct AllPathsMatcher : public PathMatcher {
+ explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
+ }
+ virtual MatchType PathMatches(const PathString& path) {
+ return MATCH;
+ }
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ return index->lower_bound(&needle_);
+ }
+
+ virtual Index::iterator upper_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ Index::iterator i = index->upper_bound(&needle_),
+ end = index->end();
+ while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
+ ++i;
+ return i;
+ }
+};
+
+// Matches an exact filename only; no wildcards.
+struct ExactPathMatcher : public PathMatcher {
+ ExactPathMatcher(const PathString& pathspec, const Id& parent_id)
+ : PathMatcher(parent_id), pathspec_(pathspec) {
+ }
+ virtual MatchType PathMatches(const PathString& path) {
+ return 0 == ComparePathNames(path, pathspec_) ? EXACT_MATCH : NO_MATCH;
+ }
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = pathspec_;
+ return index->lower_bound(&needle_);
+ }
+ virtual Index::iterator upper_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = pathspec_;
+ return index->upper_bound(&needle_);
+ }
+ const PathString pathspec_;
+};
+
+// Matches a pathspec with wildcards.
+struct PartialPathMatcher : public PathMatcher {
+ PartialPathMatcher(const PathString& pathspec,
+ PathString::size_type wildcard, const Id& parent_id)
+ : PathMatcher(parent_id), pathspec_(pathspec) {
+ if (0 == wildcard)
+ return;
+ lesser_.assign(pathspec_.data(), wildcard);
+ greater_.assign(pathspec_.data(), wildcard);
+ // Increment the last letter of greater so we can then less than
+ // compare to it.
+ PathString::size_type i = greater_.size() - 1;
+ do {
+ if (greater_[i] == std::numeric_limits<PathString::value_type>::max()) {
+ greater_.resize(i); // Try the preceding character.
+ if (0 == i--)
+ break;
+ } else {
+ greater_[i] += 1;
+ }
+ // Yes, there are cases where incrementing a character
+ // actually decreases its position in the sort. Example: 9 -> :
+ } while (ComparePathNames(lesser_, greater_) >= 0);
+ }
+
+ virtual MatchType PathMatches(const PathString& path) {
+ return PathNameMatch(path, pathspec_) ? MATCH : NO_MATCH;
+ }
+
+ virtual Index::iterator lower_bound(Index* index) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = lesser_;
+ return index->lower_bound(&needle_);
+ }
+ virtual Index::iterator upper_bound(Index* index) {
+ if (greater_.empty()) {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME).clear();
+ Index::iterator i = index->upper_bound(&needle_),
+ end = index->end();
+ while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
+ ++i;
+ return i;
+ } else {
+ needle_.ref(PARENT_ID) = parent_id_;
+ needle_.ref(NAME) = greater_;
+ return index->lower_bound(&needle_);
+ }
+ }
+
+ const PathString pathspec_;
+ PathString lesser_;
+ PathString greater_;
+};
+
+
+void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
+ Directory::ChildHandles* result) {
+ AllPathsMatcher matcher(parent_id);
+ return GetChildHandlesImpl(trans, parent_id, &matcher, result);
+}
+
+void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
+ PathMatcher* matcher,
+ Directory::ChildHandles* result) {
+ CHECK(this == trans->directory());
+ result->clear();
+ {
+ ScopedKernelLock lock(this);
+ ParentIdAndNamesIndex* const index =
+ kernel_->parent_id_and_names_index;
+ typedef ParentIdAndNamesIndex::iterator iterator;
+ for (iterator i = matcher->lower_bound(index),
+ end = matcher->upper_bound(index); i != end; ++i) {
+ // root's parent_id is NULL in the db but 0 in memory, so
+ // have avoid listing the root as its own child.
+ if ((*i)->ref(ID) == (*i)->ref(PARENT_ID))
+ continue;
+ PathMatcher::MatchType match = matcher->PathMatches((*i)->ref(NAME));
+ if (PathMatcher::NO_MATCH == match)
+ continue;
+ result->push_back((*i)->ref(META_HANDLE));
+ if (PathMatcher::EXACT_MATCH == match)
+ return;
+ }
+ }
+}
+
+EntryKernel* Directory::GetRootEntry() {
+ return GetEntryById(Id());
+}
+
+EntryKernel* Directory::GetEntryByPath(const PathString& path) {
+ CHECK(kernel_);
+ EntryKernel* result = GetRootEntry();
+ CHECK(result) << "There should always be a root node.";
+ for (PathSegmentIterator<PathString> i(path), end;
+ i != end && NULL != result; ++i) {
+ result = GetChildWithName(result->ref(ID), *i);
+ }
+ return result;
+}
+
+void ZeroFields(EntryKernel* entry, int first_field) {
+ int i = first_field;
+ // Note that bitset<> constructor sets all bits to zero, and strings
+ // initialize to empty.
+ for ( ; i < INT64_FIELDS_END; ++i)
+ entry->ref(static_cast<Int64Field>(i)) = 0;
+ for ( ; i < ID_FIELDS_END; ++i)
+ entry->ref(static_cast<IdField>(i)).Clear();
+ for ( ; i < BIT_FIELDS_END; ++i)
+ entry->ref(static_cast<BitField>(i)) = false;
+ if (i < BLOB_FIELDS_END)
+ i = BLOB_FIELDS_END;
+}
+
+void Directory::InsertEntry(EntryKernel* entry) {
+ ScopedKernelLock lock(this);
+ InsertEntry(entry, &lock);
+}
+
+void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
+ DCHECK(NULL != lock);
+ CHECK(NULL != entry);
+ static const char error[] = "Entry already in memory index.";
+ CHECK(kernel_->metahandles_index->insert(entry).second) << error;
+ if (!entry->ref(IS_DEL))
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second) << error;
+ CHECK(kernel_->ids_index->insert(entry).second) << error;
+}
+
+bool Directory::Undelete(EntryKernel* const entry) {
+ DCHECK(entry->ref(IS_DEL));
+ ScopedKernelLock lock(this);
+ if (NULL != GetChildWithName(entry->ref(PARENT_ID), entry->ref(NAME), &lock))
+ return false; // Would have duplicated existing entry.
+ entry->ref(IS_DEL) = false;
+ entry->dirty[IS_DEL] = true;
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
+ return true;
+}
+
+bool Directory::Delete(EntryKernel* const entry) {
+ DCHECK(!entry->ref(IS_DEL));
+ entry->ref(IS_DEL) = true;
+ entry->dirty[IS_DEL] = true;
+ ScopedKernelLock lock(this);
+ CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
+ return true;
+}
+
+bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
+ ScopedKernelLock lock(this);
+ if (NULL != GetEntryById(new_id, &lock))
+ return false;
+ CHECK(1 == kernel_->ids_index->erase(entry));
+ entry->ref(ID) = new_id;
+ CHECK(kernel_->ids_index->insert(entry).second);
+ return true;
+}
+
+bool Directory::ReindexParentIdAndName(EntryKernel* const entry,
+ const Id& new_parent_id,
+ const PathString& new_name) {
+ ScopedKernelLock lock(this);
+ PathString new_indexed_name = new_name;
+ if (entry->ref(IS_DEL)) {
+ entry->ref(PARENT_ID) = new_parent_id;
+ entry->ref(NAME) = new_indexed_name;
+ return true;
+ }
+
+ // check for a case changing rename
+ if (entry->ref(PARENT_ID) == new_parent_id &&
+ 0 == ComparePathNames(entry->ref(NAME), new_indexed_name)) {
+ entry->ref(NAME) = new_indexed_name;
+ } else {
+ if (NULL != GetChildWithName(new_parent_id, new_indexed_name, &lock))
+ return false;
+ CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
+ entry->ref(PARENT_ID) = new_parent_id;
+ entry->ref(NAME) = new_indexed_name;
+ CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
+ }
+ return true;
+}
+
+// static
+bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) {
+ return entry->ref(IS_DEL) && !entry->dirty.any() && !entry->ref(SYNCING) &&
+ !entry->ref(IS_UNAPPLIED_UPDATE) && !entry->ref(IS_UNSYNCED);
+}
+
+void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
+ ReadTransaction trans(this, __FILE__, __LINE__);
+ ScopedKernelLock lock(this);
+ // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
+ // clear dirty flags.
+ for (MetahandlesIndex::iterator i = kernel_->metahandles_index->begin();
+ i != kernel_->metahandles_index->end(); ++i) {
+ EntryKernel* entry = *i;
+ if (!entry->dirty.any())
+ continue;
+ snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
+ entry->dirty.reset();
+ // TODO(timsteele): The previous *windows only* SaveChanges code path seems
+ // to have a bug in that the IS_NEW bit is not rolled back if the entire DB
+ // transaction is rolled back, due to the "recent" windows optimization of
+ // using a ReadTransaction rather than a WriteTransaction in SaveChanges.
+ // This bit is only used to decide whether we should sqlite INSERT or
+ // UPDATE, and if we are INSERTing we make sure to dirty all the fields so
+ // as to overwrite the database default values. For now, this is rectified
+ // by flipping the bit to false here (note that the snapshot will contain
+ // the "original" value), and then resetting it on failure in
+ // HandleSaveChangesFailure, where "failure" is defined as "the DB
+ // "transaction was rolled back". This is safe because the only user of this
+ // bit is in fact SaveChanges, which enforces mutually exclusive access by
+ // way of save_changes_mutex_. The TODO is to consider abolishing this bit
+ // in favor of using a sqlite INSERT OR REPLACE, which could(would?) imply
+ // that all bits need to be written rather than just the dirty ones in
+ // the BindArg helper function.
+ entry->ref(IS_NEW) = false;
+ }
+
+ // Do the same for extended attributes.
+ for (ExtendedAttributes::iterator i = kernel_->extended_attributes->begin();
+ i != kernel_->extended_attributes->end(); ++i) {
+ if (!i->second.dirty)
+ continue;
+ snapshot->dirty_xattrs[i->first] = i->second;
+ i->second.dirty = false;
+ }
+
+ // Fill kernel_info_status and kernel_info.
+ PersistedKernelInfo& info = snapshot->kernel_info;
+ info.initial_sync_ended = kernel_->initial_sync_ended_;
+ info.last_sync_timestamp = kernel_->last_sync_timestamp_;
+ // To avoid duplicates when the process crashes, we record the next_id to be
+ // greater magnitude than could possibly be reached before the next save
+ // changes. In other words, it's effectively impossible for the user to
+ // generate 65536 new bookmarks in 3 seconds.
+ info.next_id = kernel_->next_id - 65536;
+ info.store_birthday = kernel_->store_birthday_;
+ snapshot->kernel_info_status = kernel_->info_status_;
+ // This one we reset on failure.
+ kernel_->info_status_ = KERNEL_SHARE_INFO_VALID;
+}
+
+bool Directory::SaveChanges() {
+ bool success = false;
+ DCHECK(store_);
+ PThreadScopedLock<PThreadMutex> lock(&kernel_->save_changes_mutex);
+ // Snapshot and save.
+ SaveChangesSnapshot snapshot;
+ TakeSnapshotForSaveChanges(&snapshot);
+ success = store_->SaveChanges(snapshot);
+
+ // Handle success or failure.
+ if (success)
+ VacuumAfterSaveChanges(snapshot);
+ else
+ HandleSaveChangesFailure(snapshot);
+ return success;
+}
+
+void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
+ // Need a write transaction as we are about to permanently purge entries.
+ WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__);
+ ScopedKernelLock lock(this);
+ kernel_->flushed_metahandles_.Push(0); // Begin flush marker
+ // Now drop everything we can out of memory.
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
+ NULL : *found);
+ if (entry && SafeToPurgeFromMemory(entry)) {
+ // We now drop deleted metahandles that are up to date on both the client
+ // and the server.
+ size_t num_erased = 0;
+ kernel_->flushed_metahandles_.Push(entry->ref(META_HANDLE));
+ num_erased = kernel_->ids_index->erase(entry);
+ DCHECK_EQ(1, num_erased);
+ num_erased = kernel_->metahandles_index->erase(entry);
+ DCHECK_EQ(1, num_erased);
+ delete entry;
+ }
+ }
+
+ ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ while (i != snapshot.dirty_xattrs.end()) {
+ ExtendedAttributeKey key(i->first.metahandle, i->first.key);
+ ExtendedAttributes::iterator found =
+ kernel_->extended_attributes->find(key);
+ if (found == kernel_->extended_attributes->end() ||
+ found->second.dirty || !i->second.is_deleted) {
+ ++i;
+ } else {
+ kernel_->extended_attributes->erase(found);
+ }
+ }
+}
+
+void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
+ ScopedKernelLock lock(this);
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+
+ // Because we cleared dirty bits on the real entries when taking the snapshot,
+ // we should make sure the fact that the snapshot was not persisted gets
+ // reflected in the entries. Not doing this would mean if no other changes
+ // occur to the same fields of the entries in dirty_metas some changes could
+ // end up being lost, if they also failed to be committed to the server.
+ // Setting the bits ensures that SaveChanges will at least try again later.
+ for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
+ i != snapshot.dirty_metas.end(); ++i) {
+ kernel_->needle.ref(META_HANDLE) = i->ref(META_HANDLE);
+ MetahandlesIndex::iterator found =
+ kernel_->metahandles_index->find(&kernel_->needle);
+ if (found != kernel_->metahandles_index->end()) {
+ (*found)->dirty |= i->dirty;
+ (*found)->ref(IS_NEW) = i->ref(IS_NEW);
+ }
+ }
+
+ for (ExtendedAttributes::const_iterator i = snapshot.dirty_xattrs.begin();
+ i != snapshot.dirty_xattrs.end(); ++i) {
+ ExtendedAttributeKey key(i->first.metahandle, i->first.key);
+ ExtendedAttributes::iterator found =
+ kernel_->extended_attributes->find(key);
+ if (found != kernel_->extended_attributes->end())
+ found->second.dirty = true;
+ }
+}
+
+int64 Directory::last_sync_timestamp() const {
+ ScopedKernelLock lock(this);
+ return kernel_->last_sync_timestamp_;
+}
+
+void Directory::set_last_sync_timestamp(int64 timestamp) {
+ ScopedKernelLock lock(this);
+ if (kernel_->last_sync_timestamp_ == timestamp)
+ return;
+ kernel_->last_sync_timestamp_ = timestamp;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+bool Directory::initial_sync_ended() const {
+ ScopedKernelLock lock(this);
+ return kernel_->initial_sync_ended_;
+}
+
+void Directory::set_initial_sync_ended(bool x) {
+ ScopedKernelLock lock(this);
+ if (kernel_->initial_sync_ended_ == x)
+ return;
+ kernel_->initial_sync_ended_ = x;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+string Directory::store_birthday() const {
+ ScopedKernelLock lock(this);
+ return kernel_->store_birthday_;
+}
+
+void Directory::set_store_birthday(string store_birthday) {
+ ScopedKernelLock lock(this);
+ if (kernel_->store_birthday_ == store_birthday)
+ return;
+ kernel_->store_birthday_ = store_birthday;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+}
+
+string Directory::cache_guid() const {
+ // No need to lock since nothing ever writes to it after load.
+ return kernel_->cache_guid_;
+}
+
+void Directory::GetAllMetaHandles(BaseTransaction* trans,
+ MetahandleSet* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ MetahandlesIndex::iterator i;
+ for (i = kernel_->metahandles_index->begin();
+ i != kernel_->metahandles_index->end();
+ ++i) {
+ result->insert((*i)->ref(META_HANDLE));
+ }
+}
+
+void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ copy(kernel_->unsynced_metahandles->begin(),
+ kernel_->unsynced_metahandles->end(), back_inserter(*result));
+}
+
+void Directory::GetAllExtendedAttributes(BaseTransaction* trans,
+ int64 metahandle,
+ std::set<ExtendedAttribute>* result) {
+ AttributeKeySet keys;
+ GetExtendedAttributesList(trans, metahandle, &keys);
+ AttributeKeySet::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ ExtendedAttributeKey key(metahandle, *iter);
+ ExtendedAttribute extended_attribute(trans, GET_BY_HANDLE, key);
+ CHECK(extended_attribute.good());
+ result->insert(extended_attribute);
+ }
+}
+
+void Directory::GetExtendedAttributesList(BaseTransaction* trans,
+ int64 metahandle, AttributeKeySet* result) {
+ ExtendedAttributes::iterator iter;
+ for (iter = kernel_->extended_attributes->begin();
+ iter != kernel_->extended_attributes->end(); ++iter) {
+ if (iter->first.metahandle == metahandle) {
+ if (!iter->second.is_deleted)
+ result->insert(iter->first.key);
+ }
+ }
+}
+
+void Directory::DeleteAllExtendedAttributes(WriteTransaction* trans,
+ int64 metahandle) {
+ AttributeKeySet keys;
+ GetExtendedAttributesList(trans, metahandle, &keys);
+ AttributeKeySet::iterator iter;
+ for (iter = keys.begin(); iter != keys.end(); ++iter) {
+ ExtendedAttributeKey key(metahandle, *iter);
+ MutableExtendedAttribute attribute(trans, GET_BY_HANDLE, key);
+ // This flags the attribute for deletion during SaveChanges. At that time
+ // any deleted attributes are purged from disk and memory.
+ attribute.delete_attribute();
+ }
+}
+
+int64 Directory::unsynced_entity_count() const {
+ ScopedKernelLock lock(this);
+ return kernel_->unsynced_metahandles->size();
+}
+
+void Directory::GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
+ UnappliedUpdateMetaHandles* result) {
+ result->clear();
+ ScopedKernelLock lock(this);
+ copy(kernel_->unapplied_update_metahandles->begin(),
+ kernel_->unapplied_update_metahandles->end(),
+ back_inserter(*result));
+}
+
+
+class IdFilter {
+ public:
+ virtual ~IdFilter() { }
+ virtual bool ShouldConsider(const Id& id) const = 0;
+};
+
+
+class FullScanFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return true;
+ }
+};
+
+class SomeIdsFilter : public IdFilter {
+ public:
+ virtual bool ShouldConsider(const Id& id) const {
+ return binary_search(ids_.begin(), ids_.end(), id);
+ }
+ std::vector<Id> ids_;
+};
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const OriginalEntries* originals) {
+ MetahandleSet handles;
+ SomeIdsFilter filter;
+ filter.ids_.reserve(originals->size());
+ for (OriginalEntries::const_iterator i = originals->begin(),
+ end = originals->end(); i != end; ++i) {
+ Entry e(trans, GET_BY_HANDLE, i->ref(META_HANDLE));
+ CHECK(e.good());
+ filter.ids_.push_back(e.Get(ID));
+ handles.insert(i->ref(META_HANDLE));
+ }
+ std::sort(filter.ids_.begin(), filter.ids_.end());
+ CheckTreeInvariants(trans, handles, filter);
+}
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan) {
+ // TODO(timsteele): This is called every time a WriteTransaction finishes.
+ // The performance hit is substantial given that we now examine every single
+ // syncable entry. Need to redesign this.
+ MetahandleSet handles;
+ GetAllMetaHandles(trans, &handles);
+ if (full_scan) {
+ FullScanFilter fullfilter;
+ CheckTreeInvariants(trans, handles, fullfilter);
+ } else {
+ SomeIdsFilter filter;
+ MetahandleSet::iterator i;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ Entry e(trans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ filter.ids_.push_back(e.Get(ID));
+ }
+ sort(filter.ids_.begin(), filter.ids_.end());
+ CheckTreeInvariants(trans, handles, filter);
+ }
+}
+
+void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter) {
+ int64 max_ms = kInvariantCheckMaxMs;
+ if (max_ms < 0)
+ max_ms = std::numeric_limits<int64>::max();
+ PerfTimer check_timer;
+ MetahandleSet::const_iterator i;
+ int entries_done = 0;
+ for (i = handles.begin() ; i != handles.end() ; ++i) {
+ int64 metahandle = *i;
+ Entry e(trans, GET_BY_HANDLE, metahandle);
+ CHECK(e.good());
+ syncable::Id id = e.Get(ID);
+ syncable::Id parentid = e.Get(PARENT_ID);
+
+ if (id.IsRoot()) {
+ CHECK(e.Get(IS_DIR)) << e;
+ CHECK(parentid.IsRoot()) << e;
+ CHECK(!e.Get(IS_UNSYNCED)) << e;
+ ++entries_done;
+ continue;
+ }
+ if (!e.Get(IS_DEL)) {
+ CHECK(id != parentid) << e;
+ CHECK(!e.Get(NAME).empty()) << e;
+ int safety_count = handles.size() + 1;
+ while (!parentid.IsRoot()) {
+ if (!idfilter.ShouldConsider(parentid))
+ break;
+ Entry parent(trans, GET_BY_ID, parentid);
+ CHECK(parent.good()) << e;
+ CHECK(parent.Get(IS_DIR)) << parent << e;
+ CHECK(!parent.Get(IS_DEL)) << parent << e;
+ CHECK(handles.end() != handles.find(parent.Get(META_HANDLE)))
+ << e << parent;
+ parentid = parent.Get(PARENT_ID);
+ CHECK(--safety_count >= 0) << e << parent;
+ }
+ }
+ int64 base_version = e.Get(BASE_VERSION);
+ int64 server_version = e.Get(SERVER_VERSION);
+ if (CHANGES_VERSION == base_version || 0 == base_version) {
+ if (e.Get(IS_UNAPPLIED_UPDATE)) {
+ // Unapplied new item.
+ CHECK(e.Get(IS_DEL)) << e;
+ CHECK(id.ServerKnows()) << e;
+ } else {
+ // Uncommitted item.
+ if (!e.Get(IS_DEL)) {
+ CHECK(e.Get(IS_UNSYNCED)) << e;
+ }
+ CHECK(0 == server_version) << e;
+ CHECK(!id.ServerKnows()) << e;
+ }
+ } else {
+ CHECK(id.ServerKnows());
+ }
+ ++entries_done;
+ int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
+ if (elapsed_ms > max_ms) {
+ LOG(INFO) << "Cutting Invariant check short after " << elapsed_ms << "ms."
+ " Processed " << entries_done << "/" << handles.size() << " entries";
+ return;
+ }
+ }
+ // I did intend to add a check here to ensure no entries had been pulled into
+ // memory by this function, but we can't guard against another ReadTransaction
+ // pulling entries into RAM
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// ScopedKernelLocks
+
+ScopedKernelLock::ScopedKernelLock(const Directory* dir)
+ : dir_(const_cast<Directory*>(dir)) {
+ // Swap out the dbhandle to enforce the "No IO while holding kernel
+ // lock" rule.
+ // HA!! Yeah right. What about your pre-cached queries :P
+ pthread_mutex_lock(&dir->kernel_->mutex);
+}
+ScopedKernelLock::~ScopedKernelLock() {
+ pthread_mutex_unlock(&dir_->kernel_->mutex);
+}
+
+ScopedKernelUnlock::ScopedKernelUnlock(ScopedKernelLock* lock)
+ : lock_(lock) {
+ pthread_mutex_unlock(&lock->dir_->kernel_->mutex);
+}
+ScopedKernelUnlock::~ScopedKernelUnlock() {
+ pthread_mutex_lock(&lock_->dir_->kernel_->mutex);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Transactions
+#if defined LOG_ALL || !defined NDEBUG
+static const bool kLoggingInfo = true;
+#else
+static const bool kLoggingInfo = false;
+#endif
+
+ThreadNode* BaseTransaction::MakeThreadNode() {
+ ThreadNode* node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dirkernel_->thread_node_key));
+ if (NULL == node) {
+ node = new ThreadNode;
+ node->id = GetCurrentThreadId();
+ pthread_setspecific(dirkernel_->thread_node_key, node);
+ } else if (node->in_list) {
+ logging::LogMessage(source_file_, line_, logging::LOG_FATAL).stream()
+ << " Recursive Lock attempt by thread id " << node->id << "." << std::endl
+ << "Already entered transaction at " << node->file << ":" << node->line;
+ }
+ node->file = source_file_;
+ node->line = line_;
+ node->wait_started = base::TimeTicks::Now();
+ return node;
+}
+
+void BaseTransaction::Lock(ThreadCounts* const thread_counts,
+ ThreadNode* node, TransactionClass tclass) {
+ ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
+ // Increment the waiters count.
+ node->tclass = tclass;
+ thread_counts->waiting += 1;
+ node->Insert(&thread_counts->waiting_headtail);
+
+ // Block until we can own the reader/writer lock
+ bool ready = 1 == thread_counts->waiting;
+ while (true) {
+ if (ready) {
+ if (0 == thread_counts->active) {
+ // We can take the lock because there is no contention.
+ break;
+ } else if (READ == tclass
+ && READ == thread_counts->active_headtail.next->tclass) {
+ // We can take the lock because reads can run simultaneously.
+ break;
+ }
+ }
+ // Wait to be woken up and check again.
+ node->wake_up = false;
+ do {
+ CHECK(0 == pthread_cond_wait(&node->condvar.condvar_,
+ &dirkernel_->transaction_mutex.mutex_));
+ } while (!node->wake_up);
+ ready = true;
+ }
+
+ // Move from the list of waiters to the list of active.
+ thread_counts->waiting -= 1;
+ thread_counts->active += 1;
+ CHECK(WRITE != tclass || 1 == thread_counts->active);
+ node->Remove();
+ node->Insert(&thread_counts->active_headtail);
+ if (WRITE == tclass)
+ node->current_write_trans = static_cast<WriteTransaction*>(this);
+}
+
+void BaseTransaction::AfterLock(ThreadNode* node) {
+ time_acquired_ = base::TimeTicks::Now();
+
+ const base::TimeDelta elapsed = time_acquired_ - node->wait_started;
+ if (kLoggingInfo && elapsed.InMilliseconds() > 200) {
+ logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
+ << name_ << " transaction waited "
+ << elapsed.InSecondsF() << " seconds.";
+ }
+}
+
+void BaseTransaction::Init(ThreadCounts* const thread_counts,
+ TransactionClass tclass) {
+ ThreadNode* const node = MakeThreadNode();
+ Lock(thread_counts, node, tclass);
+ AfterLock(node);
+}
+
+BaseTransaction::BaseTransaction(Directory* directory, const char* name,
+ const char* source_file, int line)
+ : directory_(directory), dirkernel_(directory->kernel_), name_(name),
+ source_file_(source_file), line_(line) {
+}
+
+void BaseTransaction::UnlockAndLog(ThreadCounts* const thread_counts,
+ OriginalEntries* originals_arg) {
+ scoped_ptr<OriginalEntries> originals(originals_arg);
+ const base::TimeDelta elapsed = base::TimeTicks::Now() - time_acquired_;
+ if (kLoggingInfo && elapsed.InMilliseconds() > 50) {
+ logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
+ << name_ << " transaction completed in " << elapsed.InSecondsF()
+ << " seconds.";
+ }
+
+ {
+ ScopedTransactionLock lock(&dirkernel_->transaction_mutex);
+ // Let go of the reader/writer lock
+ thread_counts->active -= 1;
+ ThreadNode* const node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dirkernel_->thread_node_key));
+ CHECK(node != NULL);
+ node->Remove();
+ node->current_write_trans = NULL;
+ if (0 == thread_counts->active) {
+ // Wake up a waiting thread, FIFO
+ if (dirkernel_->thread_counts.waiting > 0) {
+ ThreadNode* const headtail =
+ &dirkernel_->thread_counts.waiting_headtail;
+ ThreadNode* node = headtail->next;
+ node->wake_up = true;
+ CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
+ if (READ == node->tclass) do {
+ // Wake up all consecutive readers.
+ node = node->next;
+ if (node == headtail)
+ break;
+ if (READ != node->tclass)
+ break;
+ node->wake_up = true;
+ CHECK(0 == pthread_cond_signal(&node->condvar.condvar_));
+ } while (true);
+ }
+ }
+ if (NULL == originals.get() || originals->empty())
+ return;
+ dirkernel_->changes_channel_mutex.Lock();
+ // Tell listeners to calculate changes while we still have the mutex.
+ DirectoryChangeEvent event = { DirectoryChangeEvent::CALCULATE_CHANGES,
+ originals.get(), this, writer_ };
+ dirkernel_->changes_channel->NotifyListeners(event);
+ }
+ DirectoryChangeEvent event = { DirectoryChangeEvent::TRANSACTION_COMPLETE,
+ NULL, NULL, INVALID };
+ dirkernel_->changes_channel->NotifyListeners(event);
+ dirkernel_->changes_channel_mutex.Unlock();
+}
+
+ReadTransaction::ReadTransaction(Directory* directory, const char* file,
+ int line)
+ : BaseTransaction(directory, "Read", file, line) {
+ Init(&dirkernel_->thread_counts, READ);
+ writer_ = INVALID;
+}
+
+ReadTransaction::ReadTransaction(const ScopedDirLookup& scoped_dir,
+ const char* file, int line)
+ : BaseTransaction(scoped_dir.operator -> (), "Read", file, line) {
+ Init(&dirkernel_->thread_counts, READ);
+ writer_ = INVALID;
+}
+
+ReadTransaction::~ReadTransaction() {
+ UnlockAndLog(&dirkernel_->thread_counts, NULL);
+}
+
+WriteTransaction::WriteTransaction(Directory* directory, WriterTag writer,
+ const char* file, int line)
+ : BaseTransaction(directory, "Write", file, line), skip_destructor_(false),
+ originals_(new OriginalEntries) {
+ Init(&dirkernel_->thread_counts, WRITE);
+ writer_ = writer;
+}
+
+WriteTransaction::WriteTransaction(const ScopedDirLookup& scoped_dir,
+ WriterTag writer, const char* file, int line)
+ : BaseTransaction(scoped_dir.operator -> (), "Write", file, line),
+ skip_destructor_(false), originals_(new OriginalEntries) {
+ Init(&dirkernel_->thread_counts, WRITE);
+ writer_ = writer;
+}
+
+WriteTransaction::WriteTransaction(Directory* directory, const char* name,
+ WriterTag writer,
+ const char* file, int line,
+ bool skip_destructor,
+ OriginalEntries* originals)
+ : BaseTransaction(directory, name, file, line),
+ skip_destructor_(skip_destructor), originals_(originals) {
+ writer_ = writer;
+}
+
+void WriteTransaction::SaveOriginal(EntryKernel* entry) {
+ if (NULL == entry)
+ return;
+ OriginalEntries::iterator i = originals_->lower_bound(*entry);
+ if (i == originals_->end() ||
+ i->ref(META_HANDLE) != entry->ref(META_HANDLE)) {
+ originals_->insert(i, *entry);
+ }
+}
+
+WriteTransaction::~WriteTransaction() {
+ if (skip_destructor_)
+ return;
+ if (OFF != kInvariantCheckLevel) {
+ const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
+ if (full_scan)
+ directory()->CheckTreeInvariants(this, full_scan);
+ else
+ directory()->CheckTreeInvariants(this, originals_);
+ }
+ UnlockAndLog(&dirkernel_->thread_counts, originals_);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Entry
+
+Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryById(id);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByTag, const PathString& tag)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByTag(tag);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByHandle(metahandle);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByPath, const PathString& path)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetEntryByPath(path);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& parentid,
+ const PathString& name)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetChildWithName(parentid, name);
+}
+
+Entry::Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& parentid,
+ const PathString& name)
+ : basetrans_(trans) {
+ kernel_ = trans->directory()->GetChildWithDBName(parentid, name);
+}
+
+
+Directory* Entry::dir() const {
+ return basetrans_->directory();
+}
+
+PathString Entry::Get(StringField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+}
+
+void Entry::GetAllExtendedAttributes(BaseTransaction* trans,
+ std::set<ExtendedAttribute> *result) {
+ dir()->GetAllExtendedAttributes(trans, kernel_->ref(META_HANDLE), result);
+}
+
+void Entry::GetExtendedAttributesList(BaseTransaction* trans,
+ AttributeKeySet* result) {
+ dir()->GetExtendedAttributesList(trans, kernel_->ref(META_HANDLE), result);
+}
+
+void Entry::DeleteAllExtendedAttributes(WriteTransaction *trans) {
+ dir()->DeleteAllExtendedAttributes(trans, kernel_->ref(META_HANDLE));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// MutableEntry
+
+MutableEntry::MutableEntry(WriteTransaction* trans, Create,
+ const Id& parent_id, const PathString& name)
+ : Entry(trans) {
+ if (NULL != trans->directory()->GetChildWithName(parent_id, name)) {
+ kernel_ = NULL; // would have duplicated an existing entry.
+ return;
+ }
+ Init(trans, parent_id, name);
+}
+
+
+void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
+ const PathString& name) {
+ kernel_ = new EntryKernel;
+ ZeroFields(kernel_, BEGIN_FIELDS);
+ kernel_->ref(ID) = trans->directory_->NextId();
+ kernel_->dirty[ID] = true;
+ kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
+ kernel_->dirty[META_HANDLE] = true;
+ kernel_->ref(PARENT_ID) = parent_id;
+ kernel_->dirty[PARENT_ID] = true;
+ kernel_->ref(NAME) = name;
+ kernel_->dirty[NAME] = true;
+ kernel_->ref(NON_UNIQUE_NAME) = name;
+ kernel_->dirty[NON_UNIQUE_NAME] = true;
+ kernel_->ref(IS_NEW) = true;
+ const int64 now = Now();
+ kernel_->ref(CTIME) = now;
+ kernel_->dirty[CTIME] = true;
+ kernel_->ref(MTIME) = now;
+ kernel_->dirty[MTIME] = true;
+ // We match the database defaults here
+ kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
+ trans->directory()->InsertEntry(kernel_);
+ // Because this entry is new, it was originally deleted.
+ kernel_->ref(IS_DEL) = true;
+ trans->SaveOriginal(kernel_);
+ kernel_->ref(IS_DEL) = false;
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
+ const Id& id)
+ : Entry(trans) {
+ Entry same_id(trans, GET_BY_ID, id);
+ if (same_id.good()) {
+ kernel_ = NULL; // already have an item with this ID.
+ return;
+ }
+ kernel_ = new EntryKernel;
+ ZeroFields(kernel_, BEGIN_FIELDS);
+ kernel_->ref(ID) = id;
+ kernel_->dirty[ID] = true;
+ kernel_->ref(META_HANDLE) = trans->directory_->NextMetahandle();
+ kernel_->dirty[META_HANDLE] = true;
+ kernel_->ref(IS_DEL) = true;
+ kernel_->dirty[IS_DEL] = true;
+ kernel_->ref(IS_NEW) = true;
+ // We match the database defaults here
+ kernel_->ref(BASE_VERSION) = CHANGES_VERSION;
+ trans->directory()->InsertEntry(kernel_);
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
+ : Entry(trans, GET_BY_ID, id) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
+ int64 metahandle)
+ : Entry(trans, GET_BY_HANDLE, metahandle) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByPath,
+ const PathString& path)
+ : Entry(trans, GET_BY_PATH, path) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndName,
+ const Id& parentid, const PathString& name)
+ : Entry(trans, GET_BY_PARENTID_AND_NAME, parentid, name) {
+ trans->SaveOriginal(kernel_);
+}
+
+MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
+ const Id& parentid, const PathString& name)
+ : Entry(trans, GET_BY_PARENTID_AND_DBNAME, parentid, name) {
+ trans->SaveOriginal(kernel_);
+}
+
+bool MutableEntry::PutIsDel(bool is_del) {
+ DCHECK(kernel_);
+ if (is_del == kernel_->ref(IS_DEL))
+ return true;
+ if (is_del) {
+ UnlinkFromOrder();
+ if (!dir()->Delete(kernel_))
+ return false;
+ return true;
+ } else {
+ if (!dir()->Undelete(kernel_))
+ return false;
+ PutPredecessor(Id()); // Restores position to the 0th index.
+ return true;
+ }
+}
+
+bool MutableEntry::Put(Int64Field field, const int64& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IdField field, const Id& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ if (ID == field) {
+ if (!dir()->ReindexId(kernel_, value))
+ return false;
+ } else if (PARENT_ID == field) {
+ if (!dir()->ReindexParentIdAndName(kernel_, value, kernel_->ref(NAME)))
+ return false;
+ } else {
+ kernel_->ref(field) = value;
+ }
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+WriteTransaction* MutableEntry::trans() const {
+ // We are in a mutable entry, so we must be in a write transaction.
+ // Maybe we could keep a pointer to the transaction in MutableEntry.
+ ThreadNode* node = reinterpret_cast<ThreadNode*>
+ (pthread_getspecific(dir()->kernel_->thread_node_key));
+ return node->current_write_trans;
+}
+
+bool MutableEntry::Put(BaseVersion field, int64 value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(StringField field, const PathString& value) {
+ return PutImpl(field, value);
+}
+
+bool MutableEntry::PutImpl(StringField field, const PathString& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ if (NAME == field) {
+ if (!dir()->ReindexParentIdAndName(kernel_, kernel_->ref(PARENT_ID),
+ value))
+ return false;
+ } else {
+ kernel_->ref(field) = value;
+ }
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+bool MutableEntry::Put(IndexedBitField field, bool value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ MetahandleSet* index;
+ if (IS_UNSYNCED == field)
+ index = dir()->kernel_->unsynced_metahandles;
+ else
+ index = dir()->kernel_->unapplied_update_metahandles;
+
+ ScopedKernelLock lock(dir());
+ if (value)
+ CHECK(index->insert(kernel_->ref(META_HANDLE)).second);
+ else
+ CHECK(1 == index->erase(kernel_->ref(META_HANDLE)));
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+}
+
+// Avoids temporary collision in index when renaming a bookmark
+// to another folder.
+bool MutableEntry::PutParentIdAndName(const Id& parent_id,
+ const Name& name) {
+ DCHECK(kernel_);
+ const bool parent_id_changes = parent_id != kernel_->ref(PARENT_ID);
+ bool db_name_changes = name.db_value() != kernel_->ref(NAME);
+ if (parent_id_changes || db_name_changes) {
+ if (!dir()->ReindexParentIdAndName(kernel_, parent_id,
+ name.db_value()))
+ return false;
+ }
+ Put(UNSANITIZED_NAME, name.GetUnsanitizedName());
+ Put(NON_UNIQUE_NAME, name.non_unique_value());
+ if (db_name_changes)
+ kernel_->dirty[NAME] = true;
+ if (parent_id_changes) {
+ kernel_->dirty[PARENT_ID] = true;
+ PutPredecessor(Id()); // Put in the 0th position.
+ }
+ return true;
+}
+
+void MutableEntry::UnlinkFromOrder() {
+ Id old_previous = Get(PREV_ID);
+ Id old_next = Get(NEXT_ID);
+
+ // Self-looping signifies that this item is not in the order. If
+ // we were to set these to 0, we could get into trouble because
+ // this node might look like the first node in the ordering.
+ Put(NEXT_ID, Get(ID));
+ Put(PREV_ID, Get(ID));
+
+ if (!old_previous.IsRoot()) {
+ if (old_previous == old_next) {
+ // Note previous == next doesn't imply previous == next == Get(ID). We
+ // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
+ // and deleted before receiving the server ID in the commit response.
+ CHECK((old_next == Get(ID)) || !old_next.ServerKnows());
+ return; // Done if we were already self-looped (hence unlinked).
+ }
+ MutableEntry previous_entry(trans(), GET_BY_ID, old_previous);
+ CHECK(previous_entry.good());
+ previous_entry.Put(NEXT_ID, old_next);
+ }
+
+ if (!old_next.IsRoot()) {
+ MutableEntry next_entry(trans(), GET_BY_ID, old_next);
+ CHECK(next_entry.good());
+ next_entry.Put(PREV_ID, old_previous);
+ }
+}
+
+bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
+ // TODO(ncarter): Maybe there should be an independent HAS_POSITION bit?
+ if (!Get(IS_BOOKMARK_OBJECT))
+ return true;
+ UnlinkFromOrder();
+
+ if (Get(IS_DEL)) {
+ DCHECK(predecessor_id.IsNull());
+ return true;
+ }
+
+ // This is classic insert-into-doubly-linked-list from CS 101 and your last
+ // job interview. An "IsRoot" Id signifies the head or tail.
+ Id successor_id;
+ if (!predecessor_id.IsRoot()) {
+ MutableEntry predecessor(trans(), GET_BY_ID, predecessor_id);
+ CHECK(predecessor.good());
+ if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor_id = predecessor.Get(NEXT_ID);
+ predecessor.Put(NEXT_ID, Get(ID));
+ } else {
+ syncable::Directory* dir = trans()->directory();
+ successor_id = dir->GetFirstChildId(trans(), Get(PARENT_ID));
+ }
+ if (!successor_id.IsRoot()) {
+ MutableEntry successor(trans(), GET_BY_ID, successor_id);
+ CHECK(successor.good());
+ if (successor.Get(PARENT_ID) != Get(PARENT_ID))
+ return false;
+ successor.Put(PREV_ID, Get(ID));
+ }
+ DCHECK(predecessor_id != Get(ID));
+ DCHECK(successor_id != Get(ID));
+ Put(PREV_ID, predecessor_id);
+ Put(NEXT_ID, successor_id);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// High-level functions
+
+int64 Directory::NextMetahandle() {
+ ScopedKernelLock lock(this);
+ int64 metahandle = (kernel_->next_metahandle)++;
+ return metahandle;
+}
+
+// Always returns a client ID that is the string representation of a negative
+// number.
+Id Directory::NextId() {
+ int64 result;
+ {
+ ScopedKernelLock lock(this);
+ result = (kernel_->next_id)--;
+ kernel_->info_status_ = KERNEL_SHARE_INFO_DIRTY;
+ }
+ DCHECK_LT(result, 0);
+ return Id::CreateFromClientString(Int64ToString(result));
+}
+
+Id Directory::GetChildWithNullIdField(IdField field,
+ BaseTransaction* trans,
+ const Id& parent_id) {
+ // This query is O(number of children), which should be acceptable
+ // when this method is used as the first step in enumerating the children of
+ // a node. But careless use otherwise could potentially result in
+ // O((number of children)^2) performance.
+ ChildHandles child_handles;
+ GetChildHandles(trans, parent_id, &child_handles);
+ ChildHandles::const_iterator it;
+ for (it = child_handles.begin(); it != child_handles.end(); ++it) {
+ Entry e(trans, GET_BY_HANDLE, *it);
+ CHECK(e.good());
+ if (e.Get(field).IsRoot())
+ return e.Get(ID);
+ }
+
+ return Id();
+}
+
+Id Directory::GetFirstChildId(BaseTransaction* trans,
+ const Id& parent_id) {
+ return GetChildWithNullIdField(PREV_ID, trans, parent_id);
+}
+
+Id Directory::GetLastChildId(BaseTransaction* trans,
+ const Id& parent_id) {
+ return GetChildWithNullIdField(NEXT_ID, trans, parent_id);
+}
+
+ExtendedAttribute::ExtendedAttribute(BaseTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key) {
+ Directory::Kernel* const kernel = trans->directory()->kernel_;
+ ScopedKernelLock lock(trans->directory());
+ Init(trans, kernel, &lock, key);
+}
+
+bool ExtendedAttribute::Init(BaseTransaction* trans,
+ Directory::Kernel* const kernel,
+ ScopedKernelLock* lock,
+ const ExtendedAttributeKey& key) {
+ i_ = kernel->extended_attributes->find(key);
+ good_ = kernel->extended_attributes->end() != i_;
+ return good_;
+}
+
+MutableExtendedAttribute::MutableExtendedAttribute(
+ WriteTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key) :
+ ExtendedAttribute(trans, GET_BY_HANDLE, key) {
+}
+
+MutableExtendedAttribute::MutableExtendedAttribute(
+ WriteTransaction* trans, Create, const ExtendedAttributeKey& key) {
+ Directory::Kernel* const kernel = trans->directory()->kernel_;
+ ScopedKernelLock lock(trans->directory());
+ if (!Init(trans, kernel, &lock, key)) {
+ ExtendedAttributeValue val;
+ val.dirty = true;
+ i_ = kernel->extended_attributes->insert(std::make_pair(key, val)).first;
+ good_ = true;
+ }
+}
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
+ const Id& new_parent_id) {
+ if (entry_id.IsRoot())
+ return false;
+ // we have to ensure that the entry is not an ancestor of the new parent.
+ Id ancestor_id = new_parent_id;
+ while (!ancestor_id.IsRoot()) {
+ if (entry_id == ancestor_id)
+ return false;
+ Entry new_parent(trans, GET_BY_ID, ancestor_id);
+ CHECK(new_parent.good());
+ ancestor_id = new_parent.Get(PARENT_ID);
+ }
+ return true;
+}
+
+// returns -1 if s contains any non [0-9] characters
+static int PathStringToInteger(PathString s) {
+ PathString::const_iterator i = s.begin();
+ for (; i != s.end(); ++i) {
+ if (PathString::npos == PathString(PSTR("0123456789")).find(*i))
+ return -1;
+ }
+ return
+#if !PATHSTRING_IS_STD_STRING
+ _wtoi
+#else
+ atoi
+#endif
+ (s.c_str());
+}
+
+static PathString IntegerToPathString(int i) {
+ const size_t kBufSize = 25;
+ PathChar buf[kBufSize];
+#if !PATHSTRING_IS_STD_STRING
+ const int radix = 10;
+ _itow(i, buf, radix);
+#else
+ snprintf(buf, kBufSize, "%d", i);
+#endif
+ return buf;
+}
+
+// appends ~1 to the end of 's' unless there is already ~#, in which case
+// it just increments the number
+static PathString FixBasenameInCollision(const PathString s) {
+ PathString::size_type last_tilde = s.find_last_of(PSTR('~'));
+ if (PathString::npos == last_tilde) return s + PSTR("~1");
+ if (s.size() == (last_tilde + 1)) return s + PSTR("1");
+ // we have ~, but not necessarily ~# (for some number >= 0). check for that
+ int n;
+ if ((n = PathStringToInteger(s.substr(last_tilde + 1))) != -1) {
+ n++;
+ PathString pre_number = s.substr(0, last_tilde + 1);
+ return pre_number + IntegerToPathString(n);
+ } else {
+ // we have a ~, but not a number following it, so we'll add another
+ // ~ and this time, a number
+ return s + PSTR("~1");
+ }
+}
+
+void DBName::MakeNoncollidingForEntry(BaseTransaction* trans,
+ const Id& parent_id,
+ Entry *e) {
+ const PathString& desired_name = *this;
+ CHECK(!desired_name.empty());
+ PathString::size_type first_dot = desired_name.find_first_of(PSTR('.'));
+ if (PathString::npos == first_dot)
+ first_dot = desired_name.size();
+ PathString basename = desired_name.substr(0, first_dot);
+ PathString dotextension = desired_name.substr(first_dot);
+ CHECK(basename + dotextension == desired_name);
+ for (;;) {
+ // check for collision
+ PathString testname = basename + dotextension;
+ Entry same_path_entry(trans, GET_BY_PARENTID_AND_DBNAME,
+ parent_id, testname);
+ if (!same_path_entry.good() || (e && same_path_entry.Get(ID) == e->Get(ID)))
+ break;
+ // there was a collision, so fix the name
+ basename = FixBasenameInCollision(basename);
+ }
+ // Set our value to the new value. This invalidates desired_name.
+ PathString new_value = basename + dotextension;
+ swap(new_value);
+}
+
+PathString GetFullPath(BaseTransaction* trans, const Entry& e) {
+ PathString result;
+#ifdef STL_MSVC
+ result.reserve(MAX_PATH);
+#endif
+ ReverseAppend(e.Get(NAME), &result);
+ Id id = e.Get(PARENT_ID);
+ while (!id.IsRoot()) {
+ result.push_back(kPathSeparator[0]);
+ Entry ancestor(trans, GET_BY_ID, id);
+ if (!ancestor.good()) {
+ // This can happen if the parent folder got deleted before the entry.
+ LOG(WARNING) << "Cannot get full path of " << e
+ << "\nbecause an ancestor folder has been deleted.";
+ result.clear();
+ return result;
+ }
+ ReverseAppend(ancestor.Get(NAME), &result);
+ id = ancestor.Get(PARENT_ID);
+ }
+ result.push_back(kPathSeparator[0]);
+ reverse(result.begin(), result.end());
+ return result;
+}
+
+const Blob* GetExtendedAttributeValue(const Entry& e,
+ const PathString& attribute_name) {
+ ExtendedAttributeKey key(e.Get(META_HANDLE), attribute_name);
+ ExtendedAttribute extended_attribute(e.trans(), GET_BY_HANDLE, key);
+ if (extended_attribute.good() && !extended_attribute.is_deleted())
+ return &extended_attribute.value();
+ return NULL;
+}
+
+// This function sets only the flags needed to get this entry to sync.
+void MarkForSyncing(syncable::MutableEntry* e) {
+ DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
+ DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
+ e->Put(IS_UNSYNCED, true);
+ e->Put(SYNCING, false);
+}
+
+} // namespace syncable
+
+namespace {
+ class DumpSeparator {
+ } separator;
+ class DumpColon {
+ } colon;
+} // namespace
+
+inline FastDump& operator<<(FastDump& dump, const DumpSeparator&) {
+ dump.out_->sputn(", ", 2);
+ return dump;
+}
+
+inline FastDump& operator<<(FastDump& dump, const DumpColon&) {
+ dump.out_->sputn(": ", 2);
+ return dump;
+}
+
+std::ostream& operator<<(std::ostream& stream, const syncable::Entry& entry) {
+ // Using ostreams directly here is dreadfully slow, because a mutex is
+ // acquired for every <<. Users noticed it spiking CPU.
+ using browser_sync::ToUTF8;
+ using syncable::BitField;
+ using syncable::BitTemp;
+ using syncable::BlobField;
+ using syncable::EntryKernel;
+ using syncable::g_metas_columns;
+ using syncable::IdField;
+ using syncable::Int64Field;
+ using syncable::StringField;
+ using syncable::BEGIN_FIELDS;
+ using syncable::BIT_FIELDS_END;
+ using syncable::BIT_TEMPS_BEGIN;
+ using syncable::BIT_TEMPS_END;
+ using syncable::BLOB_FIELDS_END;
+ using syncable::INT64_FIELDS_END;
+ using syncable::ID_FIELDS_END;
+ using syncable::STRING_FIELDS_END;
+
+ int i;
+ FastDump s(&stream);
+ EntryKernel* const kernel = entry.kernel_;
+ for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<Int64Field>(i)) << separator;
+ }
+ for ( ; i < ID_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<IdField>(i)) << separator;
+ }
+ s << "Flags: ";
+ for ( ; i < BIT_FIELDS_END; ++i) {
+ if (kernel->ref(static_cast<BitField>(i)))
+ s << g_metas_columns[i].name << separator;
+ }
+ for ( ; i < STRING_FIELDS_END; ++i) {
+ ToUTF8 field(kernel->ref(static_cast<StringField>(i)));
+ s << g_metas_columns[i].name << colon << field.get_string() << separator;
+ }
+ for ( ; i < BLOB_FIELDS_END; ++i) {
+ s << g_metas_columns[i].name << colon
+ << kernel->ref(static_cast<BlobField>(i)) << separator;
+ }
+ s << "TempFlags: ";
+ for ( ; i < BIT_TEMPS_END; ++i) {
+ if (kernel->ref(static_cast<BitTemp>(i)))
+ s << "#" << i - BIT_TEMPS_BEGIN << separator;
+ }
+ return stream;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::Blob& blob) {
+ for (syncable::Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
+ s << std::hex << std::setw(2)
+ << std::setfill('0') << static_cast<unsigned int>(*i);
+ return s << std::dec;
+}
+
+FastDump& operator<<(FastDump& dump, const syncable::Blob& blob) {
+ if (blob.empty())
+ return dump;
+ string buffer(HexEncode(&blob[0], blob.size()));
+ dump.out_->sputn(buffer.c_str(), buffer.size());
+ return dump;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::ThreadNode& node) {
+ s << "thread id: " << std::hex << node.id << "\n"
+ << "file: " << node.file << "\n"
+ << "line: " << std::dec << node.line << "\n"
+ << "wait_started: " << node.wait_started.ToInternalValue();
+ return s;
+}
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
new file mode 100644
index 0000000..d2e8353
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -0,0 +1,1419 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
+
+#include <algorithm>
+#include <bitset>
+#include <iosfwd>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/time.h"
+#include "chrome/browser/sync/syncable/blob.h"
+#include "chrome/browser/sync/syncable/dir_open_result.h"
+#include "chrome/browser/sync/syncable/directory_event.h"
+#include "chrome/browser/sync/syncable/path_name_cmp.h"
+#include "chrome/browser/sync/syncable/syncable_id.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/compat-pthread.h"
+#include "chrome/browser/sync/util/dbgq.h"
+#include "chrome/browser/sync/util/event_sys.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/row_iterator.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+struct PurgeInfo;
+
+namespace sync_api {
+class ReadTransaction;
+class WriteNode;
+class ReadNode;
+}
+
+namespace syncable {
+class Entry;
+}
+
+std::ostream& operator<<(std::ostream& s, const syncable::Entry& e);
+
+namespace syncable {
+
+class DirectoryBackingStore;
+
+static const int64 kInvalidMetaHandle = 0;
+
+enum {
+ BEGIN_FIELDS = 0,
+ INT64_FIELDS_BEGIN = BEGIN_FIELDS
+};
+
+enum MetahandleField {
+ // Primary key into the table. Keep this as a handle to the meta entry
+ // across transactions.
+ META_HANDLE = INT64_FIELDS_BEGIN
+};
+
+enum BaseVersion {
+ // After initial upload, the version is controlled by the server, and is
+ // increased whenever the data or metadata changes on the server.
+ BASE_VERSION = META_HANDLE + 1,
+};
+
+enum Int64Field {
+ SERVER_VERSION = BASE_VERSION + 1,
+ MTIME,
+ SERVER_MTIME,
+ CTIME,
+ SERVER_CTIME,
+
+ // A numeric position value that indicates the relative ordering of
+ // this object among its siblings.
+ SERVER_POSITION_IN_PARENT,
+
+ LOCAL_EXTERNAL_ID, // ID of an item in the external local storage that this
+ // entry is associated with. (such as bookmarks.js)
+
+ INT64_FIELDS_END
+};
+
+enum {
+ INT64_FIELDS_COUNT = INT64_FIELDS_END,
+ ID_FIELDS_BEGIN = INT64_FIELDS_END,
+};
+
+enum IdField {
+ // Code in InitializeTables relies on ID being the first IdField value.
+ ID = ID_FIELDS_BEGIN,
+ PARENT_ID,
+ SERVER_PARENT_ID,
+
+ PREV_ID,
+ NEXT_ID,
+ ID_FIELDS_END
+};
+
+enum {
+ ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
+ BIT_FIELDS_BEGIN = ID_FIELDS_END
+};
+
+enum IndexedBitField {
+ IS_UNSYNCED = BIT_FIELDS_BEGIN,
+ IS_UNAPPLIED_UPDATE,
+ INDEXED_BIT_FIELDS_END,
+};
+
+enum IsDelField {
+ IS_DEL = INDEXED_BIT_FIELDS_END,
+};
+
+enum BitField {
+ IS_DIR = IS_DEL + 1,
+ IS_BOOKMARK_OBJECT,
+
+ SERVER_IS_DIR,
+ SERVER_IS_DEL,
+ SERVER_IS_BOOKMARK_OBJECT,
+
+ BIT_FIELDS_END
+};
+
+enum {
+ BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
+ STRING_FIELDS_BEGIN = BIT_FIELDS_END
+};
+
+enum StringField {
+ // The name, transformed so as to be suitable for use as a path-element. It
+ // is unique, and legal for this client.
+ NAME = STRING_FIELDS_BEGIN,
+ // The local name, pre-sanitization. It is not necessarily unique. If this
+ // is empty, it means |NAME| did not require sanitization.
+ UNSANITIZED_NAME,
+ // If NAME/UNSANITIZED_NAME are "Foo (2)", then NON_UNIQUE_NAME may be "Foo".
+ NON_UNIQUE_NAME,
+ // The server version of |NAME|. It is uniquified, but not necessarily
+ // OS-legal.
+ SERVER_NAME,
+ // The server version of |NON_UNIQUE_NAME|. Again, if SERVER_NAME is
+ // like "Foo (2)" due to a commit-time name aside, SERVER_NON_UNIQUE_NAME
+ // may hold the value "Foo".
+ SERVER_NON_UNIQUE_NAME,
+ // For bookmark entries, the URL of the bookmark.
+ BOOKMARK_URL,
+ SERVER_BOOKMARK_URL,
+
+ // A tag string which identifies this node as a particular top-level
+ // permanent object. The tag can be thought of as a unique key that
+ // identifies a singleton instance.
+ SINGLETON_TAG,
+ STRING_FIELDS_END,
+};
+
+enum {
+ STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
+ BLOB_FIELDS_BEGIN = STRING_FIELDS_END
+};
+
+// From looking at the sqlite3 docs, it's not directly stated, but it
+// seems the overhead for storing a NULL blob is very small.
+enum BlobField {
+ // For bookmark entries, the favicon data. These will be NULL for
+ // non-bookmark items.
+ BOOKMARK_FAVICON = BLOB_FIELDS_BEGIN,
+ SERVER_BOOKMARK_FAVICON,
+ BLOB_FIELDS_END,
+};
+
+enum {
+ BLOB_FIELDS_COUNT = BLOB_FIELDS_END - BLOB_FIELDS_BEGIN
+};
+
+enum {
+ FIELD_COUNT = BLOB_FIELDS_END,
+ // Past this point we have temporaries, stored in memory only.
+ BEGIN_TEMPS = BLOB_FIELDS_END,
+ BIT_TEMPS_BEGIN = BEGIN_TEMPS,
+};
+
+enum BitTemp {
+ SYNCING = BIT_TEMPS_BEGIN,
+ IS_NEW, // Means use INSERT instead of UPDATE to save to db.
+ DEPRECATED_DELETE_ON_CLOSE, // Set by redirector, IS_OPEN must also be set.
+ DEPRECATED_CHANGED_SINCE_LAST_OPEN, // Have we been written to since we've
+ // been opened.
+ BIT_TEMPS_END,
+};
+
+enum {
+ BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
+};
+
+class BaseTransaction;
+class WriteTransaction;
+class ReadTransaction;
+class Directory;
+class ScopedDirLookup;
+class ExtendedAttribute;
+
+// Instead of:
+// Entry e = transaction.GetById(id);
+// use:
+// Entry e(transaction, GET_BY_ID, id);
+//
+// Why? The former would require a copy constructor, and it would be difficult
+// to enforce that an entry never outlived its transaction if there were a copy
+// constructor.
+enum GetById {
+ GET_BY_ID
+};
+
+enum GetByTag {
+ GET_BY_TAG
+};
+
+enum GetByHandle {
+ GET_BY_HANDLE
+};
+
+enum GetByPath {
+ GET_BY_PATH
+};
+
+enum GetByParentIdAndName {
+ GET_BY_PARENTID_AND_NAME
+};
+
+// DBName is the name stored in the database.
+enum GetByParentIdAndDBName {
+ GET_BY_PARENTID_AND_DBNAME
+};
+
+enum Create {
+ CREATE
+};
+
+enum CreateNewUpdateItem {
+ CREATE_NEW_UPDATE_ITEM
+};
+
+typedef std::set<PathString> AttributeKeySet;
+
+// DBName is a PathString with additional transformation methods that are
+// useful when trying to derive a unique and legal database name from
+// an unsanitized sync name.
+class DBName : public PathString {
+ public:
+ explicit DBName(const PathString& database_name)
+ : PathString(database_name) { }
+
+ // TODO(ncarter): Remove these codepaths to maintain alternate titles
+ // which are OS legal filenames, Chrome doesn't depend on this like some
+ // other browsers do.
+ void MakeOSLegal() {
+ PathString new_value = MakePathComponentOSLegal(*this);
+ if (!new_value.empty())
+ swap(new_value);
+ }
+
+ // Modify the value of this DBName so that it is not in use by any entry
+ // inside |parent_id|, except maybe |e|. |e| may be NULL if you are trying
+ // to compute a name for an entry which has yet to be created.
+ void MakeNoncollidingForEntry(BaseTransaction* trans,
+ const Id& parent_id,
+ Entry *e);
+};
+
+// SyncName encapsulates a canonical server name. In general, when we need to
+// muck around with a name that the server sends us (e.g. to make it OS legal),
+// we try to preserve the original value in a SyncName,
+// and distill the new local value into a DBName.
+// At other times, we need to apply transforms in the
+// other direction -- that is, to create a server-appropriate SyncName from a
+// user-updated DBName (which is an OS legal name, but not necessarily in the
+// format that the server wants it to be). For that sort of thing, you should
+// initialize a SyncName from the DB name value, and use the methods of
+// SyncName to canonicalize it. At other times, you have a pair of canonical
+// server values -- one (the "value") which is unique in the parent, and another
+// (the "non unique value") which is not unique in the parent -- and you
+// simply want to create a SyncName to hold them as a pair.
+class SyncName {
+ public:
+ // Create a SyncName with the initially specified value.
+ explicit SyncName(const PathString& sync_name)
+ : value_(sync_name), non_unique_value_(sync_name) { }
+
+ // Create a SyncName by specifying a value and a non-unique value. If
+ // you use this constructor, the values you provide should already be
+ // acceptable server names. Don't use the mutation/sanitization methods
+ // on the resulting instance -- mutation won't work if you have distinct
+ // values for the unique and non-unique fields.
+ SyncName(const PathString& unique_sync_name,
+ const PathString& non_unique_sync_name)
+ : value_(unique_sync_name), non_unique_value_(non_unique_sync_name) { }
+
+#ifdef OS_MACOSX
+ // Translate [':' -> '/'] within the sync name. Used on OSX.
+ void ConvertColonsToSlashes() {
+ DCHECK_EQ(value_, non_unique_value_)
+ << "Deriving value_ will overwrite non_unique_value_.";
+ std::string temporary_copy;
+ temporary_copy.reserve(value_.size());
+ StringReplace(value_, ":", "/", true, &temporary_copy);
+ value_.swap(temporary_copy);
+ non_unique_value_ = value_;
+ }
+#endif
+
+ // Transform |value_| so that it's a legal server name.
+ void MakeServerLegal() {
+ DCHECK_EQ(value_, non_unique_value_)
+ << "Deriving value_ will overwrite non_unique_value_.";
+ // Append a trailing space if the value is one of the server's three
+ // forbidden special cases.
+ if (value_.empty() ||
+ value_ == PSTR(".") ||
+ value_ == PSTR("..")) {
+ value_.append(PSTR(" "));
+ non_unique_value_ = value_;
+ }
+ // TODO(ncarter): Handle server's other requirement: truncation to
+ // 256 bytes in Unicode NFC.
+ }
+
+ const PathString& value() const { return value_; }
+ PathString& value() { return value_; }
+ const PathString& non_unique_value() const { return non_unique_value_; }
+ PathString& non_unique_value() { return non_unique_value_; }
+
+ inline bool operator==(const SyncName& right_hand_side) const {
+ return value_ == right_hand_side.value_ &&
+ non_unique_value_ == right_hand_side.non_unique_value_;
+ }
+ inline bool operator!=(const SyncName& right_hand_side) const {
+ return !(*this == right_hand_side);
+ }
+ private:
+ PathString value_;
+ PathString non_unique_value_;
+};
+
+// Name is a SyncName which has an additional DBName that provides a way to
+// interpolate the "unsanitized name" according to the syncable convention.
+//
+// A method might accept a Name as an parameter when the sync and database
+// names need to be set simultaneously:
+//
+// void PutName(const Name& new_name) {
+// Put(NAME, new_name.db_value());
+// Put(UNSANITIZED_NAME, new_name.GetUnsanitizedName());
+// }
+//
+// A code point that is trying to convert between local database names and
+// server sync names can use Name to help with the conversion:
+//
+// SyncName server_name = entry->GetServerName();
+// Name name = Name::FromSyncName(server_name); // Initially, name.value()
+// // and name.db_value() are
+// // equal to
+// // server_name.value().
+// name.db_value().MakeOSLegal(); // Updates name.db_value in-place,
+// // leaving name.value() unchanged.
+// foo->PutName(name);
+//
+class Name : public SyncName {
+ public:
+ // Create a Name with an initially specified db_value and value.
+ Name(const PathString& db_name, const PathString& sync_name)
+ : SyncName(sync_name), db_value_(db_name) { }
+
+ // Create a Name by specifying the db name, sync name, and non-unique
+ // sync name values.
+ Name(const PathString& db_name, const PathString& sync_name,
+ const PathString& non_unique_sync_name)
+ : SyncName(sync_name, non_unique_sync_name), db_value_(db_name) { }
+
+ // Create a Name with all name values initially equal to the the single
+ // specified argument.
+ explicit Name(const PathString& sync_and_db_name)
+ : SyncName(sync_and_db_name), db_value_(sync_and_db_name) { }
+
+ // Create a Name using the local (non-SERVER) fields of an EntryKernel.
+ static Name FromEntryKernel(struct EntryKernel*);
+
+ // Create a Name from a SyncName. db_value is initially sync_name.value().
+ // non_unique_value() and value() are copied from |sync_name|.
+ static Name FromSyncName(const SyncName& sync_name) {
+ return Name(sync_name.value(), sync_name.value(),
+ sync_name.non_unique_value());
+ }
+
+ static Name FromDBNameAndSyncName(const PathString& db_name,
+ const SyncName& sync_name) {
+ return Name(db_name, sync_name.value(), sync_name.non_unique_value());
+ }
+
+ // Get the database name. The non-const version is useful for in-place
+ // mutation.
+ const DBName& db_value() const { return db_value_; }
+ DBName& db_value() { return db_value_; }
+
+ // Do the sync names and database names differ? This indicates that
+ // the sync name has been sanitized, and that GetUnsanitizedName() will
+ // be non-empty.
+ bool HasBeenSanitized() const { return db_value_ != value(); }
+
+ // Compute the value of the unsanitized name from the current sync and db
+ // name values. The unsanitized name is the sync name value, unless the sync
+ // name is the same as the db name value, in which case the unsanitized name
+ // is empty.
+ PathString GetUnsanitizedName() const {
+ return HasBeenSanitized() ? value() : PathString();
+ }
+
+ inline bool operator==(const Name& right_hand_side) const {
+ return this->SyncName::operator==(right_hand_side) &&
+ db_value_ == right_hand_side.db_value_;
+ }
+ inline bool operator!=(const Name& right_hand_side) const {
+ return !(*this == right_hand_side);
+ }
+
+ private:
+ // The database name, which is maintained to be a legal and unique-in-parent
+ // name.
+ DBName db_value_;
+};
+
+// Why the singular enums? So the code compile-time dispatches instead of
+// runtime dispatches as it would with a single enum and an if() statement.
+
+// The EntryKernel class contains the actual data for an entry. It
+// would be a private class, except the number of required friend
+// declarations would bloat the code.
+struct EntryKernel {
+ protected:
+ PathString string_fields[STRING_FIELDS_COUNT];
+ Blob blob_fields[BLOB_FIELDS_COUNT];
+ int64 int64_fields[INT64_FIELDS_COUNT];
+ Id id_fields[ID_FIELDS_COUNT];
+ std::bitset<BIT_FIELDS_COUNT> bit_fields;
+ std::bitset<BIT_TEMPS_COUNT> bit_temps;
+
+ public:
+ std::bitset<FIELD_COUNT> dirty;
+
+ // Contain all this error-prone arithmetic in one place.
+ inline int64& ref(MetahandleField field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline int64& ref(Int64Field field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline Id& ref(IdField field) {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+ inline int64& ref(BaseVersion field) {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(IndexedBitField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(IsDelField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_FIELDS_COUNT>::reference ref(BitField field) {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline PathString& ref(StringField field) {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline Blob& ref(BlobField field) {
+ return blob_fields[field - BLOB_FIELDS_BEGIN];
+ }
+ inline std::bitset<BIT_TEMPS_COUNT>::reference ref(BitTemp field) {
+ return bit_temps[field - BIT_TEMPS_BEGIN];
+ }
+
+ inline int64 ref(MetahandleField field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline int64 ref(Int64Field field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline const Id& ref(IdField field) const {
+ return id_fields[field - ID_FIELDS_BEGIN];
+ }
+ inline int64 ref(BaseVersion field) const {
+ return int64_fields[field - INT64_FIELDS_BEGIN];
+ }
+ inline bool ref(IndexedBitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(IsDelField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline bool ref(BitField field) const {
+ return bit_fields[field - BIT_FIELDS_BEGIN];
+ }
+ inline PathString ref(StringField field) const {
+ return string_fields[field - STRING_FIELDS_BEGIN];
+ }
+ inline Blob ref(BlobField field) const {
+ return blob_fields[field - BLOB_FIELDS_BEGIN];
+ }
+ inline bool ref(BitTemp field) const {
+ return bit_temps[field - BIT_TEMPS_BEGIN];
+ }
+};
+
+// A read-only meta entry.
+class Entry {
+ friend class Directory;
+ friend std::ostream& ::operator << (std::ostream& s, const Entry& e);
+
+ public:
+ // After constructing, you must check good() to test whether the Get
+ // succeed.
+ Entry(BaseTransaction* trans, GetByHandle, int64 handle);
+ Entry(BaseTransaction* trans, GetById, const Id& id);
+ Entry(BaseTransaction* trans, GetByTag, const PathString& tag);
+ Entry(BaseTransaction* trans, GetByPath, const PathString& path);
+ Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& id,
+ const PathString& name);
+ Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& id,
+ const PathString& name);
+
+ bool good() const { return 0 != kernel_; }
+
+ BaseTransaction* trans() const { return basetrans_; }
+
+ // Field accessors.
+ inline int64 Get(MetahandleField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline Id Get(IdField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(Int64Field field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline int64 Get(BaseVersion field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IndexedBitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(IsDelField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ PathString Get(StringField field) const;
+ inline Blob Get(BlobField field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline bool Get(BitTemp field) const {
+ DCHECK(kernel_);
+ return kernel_->ref(field);
+ }
+ inline Name GetName() const {
+ DCHECK(kernel_);
+ return Name::FromEntryKernel(kernel_);
+ }
+ inline SyncName GetServerName() const {
+ DCHECK(kernel_);
+ return SyncName(kernel_->ref(SERVER_NAME),
+ kernel_->ref(SERVER_NON_UNIQUE_NAME));
+ }
+ inline bool SyncNameMatchesServerName() const {
+ DCHECK(kernel_);
+ SyncName sync_name(GetName());
+ return sync_name == GetServerName();
+ }
+ inline PathString GetSyncNameValue() const {
+ DCHECK(kernel_);
+ // This should always be equal to GetName().sync_name().value(), but
+ // maybe faster.
+ return kernel_->ref(UNSANITIZED_NAME).empty() ? kernel_->ref(NAME) :
+ kernel_->ref(UNSANITIZED_NAME);
+ }
+ inline bool ExistsOnClientBecauseDatabaseNameIsNonEmpty() const {
+ DCHECK(kernel_);
+ return !kernel_->ref(NAME).empty();
+ }
+ inline bool IsRoot() const {
+ DCHECK(kernel_);
+ return kernel_->ref(ID).IsRoot();
+ }
+
+ void GetAllExtendedAttributes(BaseTransaction* trans,
+ std::set<ExtendedAttribute>* result);
+ void GetExtendedAttributesList(BaseTransaction* trans,
+ AttributeKeySet* result);
+ // Flags all extended attributes for deletion on the next SaveChanges.
+ void DeleteAllExtendedAttributes(WriteTransaction *trans);
+
+ Directory* dir() const;
+
+ const EntryKernel GetKernelCopy() const {
+ return *kernel_;
+ }
+
+
+ protected: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::ReadNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ inline Entry(BaseTransaction* trans) : basetrans_(trans) { }
+
+ protected:
+
+ BaseTransaction* const basetrans_;
+
+ EntryKernel* kernel_;
+
+ DISALLOW_COPY_AND_ASSIGN(Entry);
+};
+
+// A mutable meta entry. Changes get committed to the database when the
+// WriteTransaction is destroyed.
+class MutableEntry : public Entry {
+ friend class WriteTransaction;
+ friend class Directory;
+ void Init(WriteTransaction* trans, const Id& parent_id,
+ const PathString& name);
+ public:
+ MutableEntry(WriteTransaction* trans, Create, const Id& parent_id,
+ const PathString& name);
+ MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
+ MutableEntry(WriteTransaction* trans, GetByHandle, int64);
+ MutableEntry(WriteTransaction* trans, GetById, const Id&);
+ MutableEntry(WriteTransaction* trans, GetByPath, const PathString& path);
+ MutableEntry(WriteTransaction* trans, GetByParentIdAndName, const Id&,
+ const PathString& name);
+ MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
+ const Id& parentid, const PathString& name);
+
+ WriteTransaction* trans() const;
+
+ // Field Accessors. Some of them trigger the re-indexing of the entry.
+ // Return true on success, return false on failure, which means
+ // that putting the value would have caused a duplicate in the index.
+ bool Put(Int64Field field, const int64& value);
+ bool Put(IdField field, const Id& value);
+ bool Put(StringField field, const PathString& value);
+ bool Put(BaseVersion field, int64 value);
+ inline bool PutName(const Name& name) {
+ return (Put(NAME, name.db_value()) &&
+ Put(UNSANITIZED_NAME, name.GetUnsanitizedName()) &&
+ Put(NON_UNIQUE_NAME, name.non_unique_value()));
+ }
+ inline bool PutServerName(const SyncName& server_name) {
+ return (Put(SERVER_NAME, server_name.value()) &&
+ Put(SERVER_NON_UNIQUE_NAME, server_name.non_unique_value()));
+ }
+ inline bool Put(BlobField field, const Blob& value) {
+ return PutField(field, value);
+ }
+ inline bool Put(BitField field, bool value) {
+ return PutField(field, value);
+ }
+ inline bool Put(IsDelField field, bool value) {
+ return PutIsDel(value);
+ }
+ bool Put(IndexedBitField field, bool value);
+
+ // Avoids temporary collision in index when renaming a bookmark
+ // into another folder.
+ bool PutParentIdAndName(const Id& parent_id, const Name& name);
+
+ // Sets the position of this item, and updates the entry kernels of the
+ // adjacent siblings so that list invariants are maintained. Returns false
+ // and fails if |predecessor_id| does not identify a sibling. Pass the root
+ // ID to put the node in first position.
+ bool PutPredecessor(const Id& predecessor_id);
+
+ inline bool Put(BitTemp field, bool value) {
+ return PutTemp(field, value);
+ }
+
+ protected:
+
+ template <typename FieldType, typename ValueType>
+ inline bool PutField(FieldType field, const ValueType& value) {
+ DCHECK(kernel_);
+ if (kernel_->ref(field) != value) {
+ kernel_->ref(field) = value;
+ kernel_->dirty[static_cast<int>(field)] = true;
+ }
+ return true;
+ }
+
+ template <typename TempType, typename ValueType>
+ inline bool PutTemp(TempType field, const ValueType& value) {
+ DCHECK(kernel_);
+ kernel_->ref(field) = value;
+ return true;
+ }
+
+ bool PutIsDel(bool value);
+
+ private: // Don't allow creation on heap, except by sync API wrappers.
+ friend class sync_api::WriteNode;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ bool PutImpl(StringField field, const PathString& value);
+
+
+ // Adjusts the successor and predecessor entries so that they no longer
+ // refer to this entry.
+ void UnlinkFromOrder();
+
+ protected:
+ MutableEntry();
+
+ DISALLOW_COPY_AND_ASSIGN(MutableEntry);
+};
+
+template <Int64Field field_index>
+class SameField;
+template <Int64Field field_index>
+class HashField;
+class LessParentIdAndNames;
+class LessMultiIncusionTargetAndMetahandle;
+template <typename FieldType, FieldType field_index>
+class LessField;
+class LessEntryMetaHandles {
+ public:
+ inline bool operator()(const syncable::EntryKernel& a,
+ const syncable::EntryKernel& b) const {
+ return a.ref(META_HANDLE) < b.ref(META_HANDLE);
+ }
+};
+typedef std::set<EntryKernel, LessEntryMetaHandles> OriginalEntries;
+
+// a WriteTransaction has a writer tag describing which body of code is doing
+// the write. This is defined up here since DirectoryChangeEvent also contains
+// one.
+enum WriterTag {
+ INVALID, SYNCER, AUTHWATCHER, UNITTEST, VACUUM_AFTER_SAVE, SYNCAPI
+};
+
+// A separate Event type and channel for very frequent changes, caused
+// by anything, not just the user.
+struct DirectoryChangeEvent {
+ enum {
+ // Means listener should go through list of original entries and
+ // calculate what it needs to notify. It should *not* call any
+ // callbacks or attempt to lock anything because a
+ // WriteTransaction is being held until the listener returns.
+ CALCULATE_CHANGES,
+ // Means the WriteTransaction has been released and the listener
+ // can now take action on the changes it calculated.
+ TRANSACTION_COMPLETE,
+ // Channel is closing.
+ SHUTDOWN
+ } todo;
+ // These members are only valid for CALCULATE_CHANGES
+ const OriginalEntries* originals;
+ BaseTransaction* trans;
+ WriterTag writer;
+ typedef DirectoryChangeEvent EventType;
+ static inline bool IsChannelShutdownEvent(const EventType& e) {
+ return SHUTDOWN == e.todo;
+ }
+};
+
+struct ExtendedAttributeKey {
+ int64 metahandle;
+ PathString key;
+ inline bool operator < (const ExtendedAttributeKey& x) const {
+ if (metahandle != x.metahandle)
+ return metahandle < x.metahandle;
+ return key.compare(x.key) < 0;
+ }
+ ExtendedAttributeKey(int64 metahandle, PathString key) :
+ metahandle(metahandle), key(key) { }
+};
+
+struct ExtendedAttributeValue {
+ Blob value;
+ bool is_deleted;
+ bool dirty;
+};
+
+typedef std::map<ExtendedAttributeKey, ExtendedAttributeValue>
+ ExtendedAttributes;
+
+// Used to maintain our per-thread transaction state and to enforce
+// our transaction invariants (e.g. no recursive transactions).
+// Each time a thread enters a transaction by constructing a Read or a
+// WriteTransaction object, a ThreadNode object is pulled from thread
+// local storage, or created and stored in thread-local storage if it
+// doesn't yet exist.
+struct ThreadNode {
+ const char* file;
+ int line;
+ base::TimeTicks wait_started;
+ ThreadId id;
+ ThreadNode* next;
+ ThreadNode* prev;
+
+ // True when this node is in a linked list. Only accessed from
+ // owner thread so no locking necessary.
+ bool in_list;
+ WriteTransaction* current_write_trans;
+ PThreadCondVar condvar; // Mutex is the kernel's transaction mutex.
+ bool wake_up; // flag for condvar.
+ int tclass; // Really a BaseTransaction::TClass, but no forward enums.
+
+ // Linked list operations.
+ inline ThreadNode() : in_list(false), current_write_trans(NULL),
+ wake_up(false) {
+ next = prev = this;
+ }
+ inline ThreadNode* Remove() {
+ in_list = false;
+ prev->next = next;
+ next->prev = prev;
+ return next = prev = this;
+ }
+ inline void Insert(ThreadNode* node) {
+ in_list = true;
+ prev = node->prev;
+ next = node;
+ next->prev = prev->next = this;
+ }
+};
+
+struct ThreadCounts {
+ int waiting;
+ int active;
+ // Also keep a linked list of thread information.
+ ThreadNode waiting_headtail;
+ ThreadNode active_headtail;
+
+ ThreadCounts() : waiting(0), active(0) { }
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadCounts);
+};
+
+typedef PThreadScopedLock<PThreadMutex> ScopedTransactionLock;
+typedef std::set<int64> MetahandleSet;
+
+// A list of metahandles whose metadata should not be purged.
+typedef std::multiset<int64> Pegs;
+
+// The name Directory in this case means the entire directory
+// structure within a single user account.
+//
+// Sqlite is a little goofy, in that each thread must access a database
+// via its own handle. So, a Directory object should only be accessed
+// from a single thread. Use DirectoryManager's Open() method to
+// always get a directory that has been properly initialized on the
+// current thread.
+//
+// The db is protected against concurrent modification by a reader/
+// writer lock, negotiated by the ReadTransaction and WriteTransaction
+// friend classes. The in-memory indices are protected against
+// concurrent modification by the kernel lock.
+//
+// All methods which require the reader/writer lock to be held either
+// are protected and only called from friends in a transaction
+// or are public and take a Transaction* argument.
+//
+// All methods which require the kernel lock to be already held take a
+// ScopeKernelLock* argument.
+//
+// To prevent deadlock, the reader writer transaction lock must always
+// be held before acquiring the kernel lock.
+class ScopedKernelLock;
+class IdFilter;
+class DirectoryManager;
+struct PathMatcher;
+
+class Directory {
+ friend class BaseTransaction;
+ friend class Entry;
+ friend class ExtendedAttribute;
+ friend class MutableEntry;
+ friend class MutableExtendedAttribute;
+ friend class ReadTransaction;
+ friend class ReadTransactionWithoutDB;
+ friend class ScopedKernelLock;
+ friend class ScopedKernelUnlock;
+ friend class WriteTransaction;
+ friend class TestUnsaveableDirectory;
+ public:
+ // Various data that the Directory::Kernel we are backing (persisting data
+ // for) needs saved across runs of the application.
+ struct PersistedKernelInfo {
+ int64 last_sync_timestamp;
+ bool initial_sync_ended;
+ std::string store_birthday;
+ int64 next_id;
+ PersistedKernelInfo() : last_sync_timestamp(0),
+ initial_sync_ended(false),
+ next_id(0) {
+ }
+ };
+
+ // What the Directory needs on initialization to create itself and its Kernel.
+ // Filled by DirectoryBackingStore::Load.
+ struct KernelLoadInfo {
+ PersistedKernelInfo kernel_info;
+ std::string cache_guid; // Created on first initialization, never changes.
+ int64 max_metahandle; // Computed (using sql MAX aggregate) on init.
+ KernelLoadInfo() : max_metahandle(0) {
+ }
+ };
+
+ // The dirty/clean state of kernel fields backed by the share_info table.
+ // This is public so it can be used in SaveChangesSnapshot for persistence.
+ enum KernelShareInfoStatus {
+ KERNEL_SHARE_INFO_INVALID,
+ KERNEL_SHARE_INFO_VALID,
+ KERNEL_SHARE_INFO_DIRTY
+ };
+
+ // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
+ // constructed and forms a consistent snapshot of what needs to be sent to
+ // the backing store.
+ struct SaveChangesSnapshot {
+ KernelShareInfoStatus kernel_info_status;
+ PersistedKernelInfo kernel_info;
+ OriginalEntries dirty_metas;
+ ExtendedAttributes dirty_xattrs;
+ SaveChangesSnapshot() : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
+ }
+ };
+
+ Directory();
+ virtual ~Directory();
+
+ DirOpenResult Open(const PathString& file_path, const PathString& name);
+
+ void Close();
+
+ int64 NextMetahandle();
+ // Always returns a negative id. Positive client ids are generated
+ // by the server only.
+ Id NextId();
+
+ PathString file_path() const { return kernel_->db_path; }
+ bool good() const { return NULL != store_; }
+
+ // The sync timestamp is an index into the list of changes for an account.
+ // It doesn't actually map to any time scale, it's name is an historical
+ // anomaly.
+ int64 last_sync_timestamp() const;
+ void set_last_sync_timestamp(int64 timestamp);
+
+ bool initial_sync_ended() const;
+ void set_initial_sync_ended(bool value);
+
+ PathString name() const { return kernel_->name_; }
+
+ // (Account) Store birthday is opaque to the client,
+ // so we keep it in the format it is in the proto buffer
+ // in case we switch to a binary birthday later.
+ std::string store_birthday() const;
+ void set_store_birthday(std::string store_birthday);
+
+ // Unique to each account / client pair.
+ std::string cache_guid() const;
+
+ protected: // for friends, mainly used by Entry constructors
+ EntryKernel* GetChildWithName(const Id& parent_id, const PathString& name);
+ EntryKernel* GetChildWithDBName(const Id& parent_id, const PathString& name);
+ EntryKernel* GetEntryByHandle(const int64 handle);
+ EntryKernel* GetEntryByHandle(const int64 metahandle, ScopedKernelLock* lock);
+ EntryKernel* GetEntryById(const Id& id);
+ EntryKernel* GetEntryByTag(const PathString& tag);
+ EntryKernel* GetRootEntry();
+ EntryKernel* GetEntryByPath(const PathString& path);
+ bool ReindexId(EntryKernel* const entry, const Id& new_id);
+ bool ReindexParentIdAndName(EntryKernel* const entry, const Id& new_parent_id,
+ const PathString& new_name);
+ // These don't do the semantic checking that the redirector needs.
+ // The semantic checking is implemented higher up.
+ bool Undelete(EntryKernel* const entry);
+ bool Delete(EntryKernel* const entry);
+
+ // Overridden by tests.
+ virtual DirectoryBackingStore* CreateBackingStore(
+ const PathString& dir_name,
+ const PathString& backing_filepath);
+
+ private:
+ // These private versions expect the kernel lock to already be held
+ // before calling.
+ EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
+ EntryKernel* GetChildWithName(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock);
+ EntryKernel* GetChildWithNameImpl(const Id& parent_id,
+ const PathString& name,
+ ScopedKernelLock* const lock);
+
+ DirOpenResult OpenImpl(const PathString& file_path, const PathString& name);
+
+ struct DirectoryEventTraits {
+ typedef DirectoryEvent EventType;
+ static inline bool IsChannelShutdownEvent(const DirectoryEvent& event) {
+ return DIRECTORY_DESTROYED == event;
+ }
+ };
+ public:
+ typedef EventChannel<DirectoryEventTraits, PThreadMutex> Channel;
+ typedef EventChannel<DirectoryChangeEvent, PThreadMutex> ChangesChannel;
+ typedef std::vector<int64> ChildHandles;
+
+ // Returns the child meta handles for given parent id.
+ void GetChildHandles(BaseTransaction*, const Id& parent_id,
+ const PathString& path_spec, ChildHandles* result);
+ void GetChildHandles(BaseTransaction*, const Id& parent_id,
+ ChildHandles* result);
+ void GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
+ PathMatcher* matcher, ChildHandles* result);
+
+ // Find the first or last child in the positional ordering under a parent,
+ // and return its id. Returns a root Id if parent has no children.
+ Id GetFirstChildId(BaseTransaction* trans, const Id& parent_id);
+ Id GetLastChildId(BaseTransaction* trans, const Id& parent_id);
+
+ // SaveChanges works by taking a consistent snapshot of the current Directory
+ // state and indices (by deep copy) under a ReadTransaction, passing this
+ // snapshot to the backing store under no transaction, and finally cleaning
+ // up by either purging entries no longer needed (this part done under a
+ // WriteTransaction) or rolling back dirty and IS_NEW bits. It also uses
+ // internal locking to enforce SaveChanges operations are mutually exclusive.
+ //
+ // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
+ bool SaveChanges();
+
+ // Returns the number of entities with the unsynced bit set.
+ int64 unsynced_entity_count() const;
+
+ // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
+ // before any new entries have been created. The intention is that the
+ // syncer should call it from its PerformSyncQueries member.
+ typedef std::vector<int64> UnsyncedMetaHandles;
+ void GetUnsyncedMetaHandles(BaseTransaction* trans,
+ UnsyncedMetaHandles* result);
+
+ // Get all the metahandles for unapplied updates
+ typedef std::vector<int64> UnappliedUpdateMetaHandles;
+ void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
+ UnappliedUpdateMetaHandles* result);
+
+ void GetAllExtendedAttributes(BaseTransaction* trans, int64 metahandle,
+ std::set<ExtendedAttribute>* result);
+ // Get all extended attribute keys associated with a metahandle
+ void GetExtendedAttributesList(BaseTransaction* trans, int64 metahandle,
+ AttributeKeySet* result);
+ // Flags all extended attributes for deletion on the next SaveChanges.
+ void DeleteAllExtendedAttributes(WriteTransaction*trans, int64 metahandle);
+
+ // Get the channel for post save notification, used by the syncer.
+ inline Channel* channel() const {
+ return kernel_->channel;
+ }
+ inline ChangesChannel* changes_channel() const {
+ return kernel_->changes_channel;
+ }
+
+ // Checks tree metadata consistency.
+ // If full_scan is false, the function will avoid pulling any entries from the
+ // db and scan entries currently in ram.
+ // If full_scan is true, all entries will be pulled from the database.
+ // No return value, CHECKs will be triggered if we're given bad
+ // information.
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ bool full_scan);
+
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const OriginalEntries* originals);
+
+ void CheckTreeInvariants(syncable::BaseTransaction* trans,
+ const MetahandleSet& handles,
+ const IdFilter& idfilter);
+
+ private:
+ // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
+ // and unapplied_metahandles from metahandles_index.
+ void InitializeIndices();
+
+ // Constructs a consistent snapshot of the current Directory state and
+ // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
+ // See SaveChanges() for more information.
+ void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
+
+ // Purges from memory any unused, safe to remove entries that were
+ // successfully deleted on disk as a result of the SaveChanges that processed
+ // |snapshot|. See SaveChanges() for more information.
+ void VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
+
+ // Rolls back dirty and IS_NEW bits in the event that the SaveChanges that
+ // processed |snapshot| failed, for ex. due to no disk space.
+ void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
+
+ void InsertEntry(EntryKernel* entry, ScopedKernelLock* lock);
+ void InsertEntry(EntryKernel* entry);
+
+ // Used by CheckTreeInvariants
+ void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
+
+ static bool SafeToPurgeFromMemory(const EntryKernel* const entry);
+
+ // Helper method used to implement GetFirstChildId/GetLastChildId.
+ Id GetChildWithNullIdField(IdField field,
+ BaseTransaction* trans,
+ const Id& parent_id);
+
+ Directory& operator = (const Directory&);
+
+ // TODO(sync): If lookups and inserts in these sets become
+ // the bottle-neck, then we can use hash-sets instead. But
+ // that will require using #ifdefs and compiler-specific code,
+ // so use standard sets for now.
+ public:
+ typedef std::set<EntryKernel*, LessField<MetahandleField, META_HANDLE> >
+ MetahandlesIndex;
+ typedef std::set<EntryKernel*, LessField<IdField, ID> > IdsIndex;
+ // All entries in memory must be in both the MetahandlesIndex and
+ // the IdsIndex, but only non-deleted entries will be the
+ // ParentIdAndNamesIndex, because there can be multiple deleted
+ // entries with the same parent id and name.
+ typedef std::set<EntryKernel*, LessParentIdAndNames> ParentIdAndNamesIndex;
+ typedef std::vector<int64> MetahandlesToPurge;
+
+ private:
+
+ struct Kernel {
+ Kernel(const PathString& db_path, const PathString& name,
+ const KernelLoadInfo& info);
+
+ ~Kernel();
+
+ PathString const db_path;
+ // TODO(timsteele): audit use of the member and remove if possible
+ volatile base::subtle::AtomicWord refcount;
+ void AddRef(); // For convenience.
+ void Release();
+
+ // Next 3 members implement the reader/writer lock.
+ PThreadMutex transaction_mutex; // Protects next member.
+ ThreadCounts thread_counts;
+ pthread_key_t thread_node_key;
+
+ // The name of this directory, used as a key into open_files_;
+ PathString const name_;
+
+ // Protects all members below.
+ // The mutex effectively protects all the indices, but not the
+ // entries themselves. So once a pointer to an entry is pulled
+ // from the index, the mutex can be unlocked and entry read or written.
+ //
+ // Never hold the mutex and do anything with the database or any
+ // other buffered IO. Violating this rule will result in deadlock.
+ pthread_mutex_t mutex;
+ MetahandlesIndex* metahandles_index; // Entries indexed by metahandle
+ IdsIndex* ids_index; // Entries indexed by id
+ ParentIdAndNamesIndex* parent_id_and_names_index;
+ // So we don't have to create an EntryKernel every time we want to
+ // look something up in an index. Needle in haystack metaphore.
+ EntryKernel needle;
+ ExtendedAttributes* const extended_attributes;
+
+ // 2 in-memory indices on bits used extremely frequently by the syncer.
+ MetahandleSet* const unapplied_update_metahandles;
+ MetahandleSet* const unsynced_metahandles;
+ // TODO(timsteele): Add a dirty_metahandles index as we now may want to
+ // optimize the SaveChanges work of scanning all entries to find dirty ones
+ // due to the entire entry domain now being in-memory.
+
+ // TODO(ncarter): Figure out what the hell this is, and comment it.
+ Channel* const channel;
+
+ // The changes channel mutex is explicit because it must be locked
+ // while holding the transaction mutex and released after
+ // releasing the transaction mutex.
+ ChangesChannel* const changes_channel;
+ PThreadMutex changes_channel_mutex;
+ KernelShareInfoStatus info_status_;
+ // These 5 members are backed in the share_info table, and
+ // their state is marked by the flag above.
+ // Last sync timestamp fetched from the server.
+ int64 last_sync_timestamp_;
+ // true iff we ever reached the end of the changelog.
+ bool initial_sync_ended_;
+ // The store birthday we were given by the server. Contents are opaque to
+ // the client.
+ std::string store_birthday_;
+ // A unique identifier for this account's cache db, used to generate
+ // unique server IDs. No need to lock, only written at init time.
+ std::string cache_guid_;
+
+ // It doesn't make sense for two threads to run SaveChanges at the same
+ // time; this mutex protects that activity.
+ PThreadMutex save_changes_mutex;
+
+ // The next metahandle and id are protected by kernel mutex.
+ int64 next_metahandle;
+ int64 next_id;
+
+ // Keep a history of recently flushed metahandles for debugging
+ // purposes. Protected by the save_changes_mutex.
+ DebugQueue<int64, 1000> flushed_metahandles_;
+ };
+
+ Kernel* kernel_;
+
+ DirectoryBackingStore* store_;
+};
+
+class ScopedKernelLock {
+ public:
+ explicit ScopedKernelLock(const Directory*);
+ ~ScopedKernelLock();
+
+ Directory* const dir_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
+};
+
+class ScopedKernelUnlock {
+ public:
+ explicit ScopedKernelUnlock(ScopedKernelLock* lock);
+ ~ScopedKernelUnlock();
+ ScopedKernelLock* const lock_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedKernelUnlock);
+};
+
+// Transactions are now processed FIFO (+overlapping reads).
+class BaseTransaction {
+ friend class Entry;
+ public:
+ enum TransactionClass { READ, WRITE };
+
+ protected:
+ explicit BaseTransaction(Directory* directory, const char* name,
+ const char* source_file, int line);
+
+ // The members below are optionally called by descendants.
+ void Lock(ThreadCounts* const thread_counts, ThreadNode* thread_node,
+ TransactionClass tclass);
+ void AfterLock(ThreadNode* thread_node);
+ void UnlockAndLog(ThreadCounts* const thread_counts, OriginalEntries*);
+ void Init(ThreadCounts* const thread_counts, TransactionClass tclass);
+ ThreadNode* MakeThreadNode();
+ public:
+
+ inline Directory* directory() const { return directory_; }
+
+ inline Id root_id() const { return Id(); }
+
+ protected:
+ Directory* const directory_;
+ Directory::Kernel* const dirkernel_; // for brevity
+ const char* const name_;
+ base::TimeTicks time_acquired_;
+ const char* const source_file_;
+ const int line_;
+ WriterTag writer_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class ReadTransaction : public BaseTransaction {
+ public:
+ ReadTransaction(Directory* directory, const char* source_file,
+ int line);
+ ReadTransaction(const ScopedDirLookup& scoped_dir,
+ const char* source_file, int line);
+
+ ~ReadTransaction();
+
+ protected: // Don't allow creation on heap, except by sync API wrapper.
+ friend class sync_api::ReadTransaction;
+ void* operator new(size_t size) { return (::operator new)(size); }
+
+ DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
+};
+
+// Locks db in constructor, unlocks in destructor.
+class WriteTransaction : public BaseTransaction {
+ friend class MutableEntry;
+ public:
+ explicit WriteTransaction(Directory* directory, WriterTag writer,
+ const char* source_file, int line);
+ explicit WriteTransaction(const ScopedDirLookup& directory,
+ WriterTag writer, const char* source_file,
+ int line);
+ virtual ~WriteTransaction();
+
+ void SaveOriginal(EntryKernel* entry);
+
+ protected:
+ // If I had had the foresight to create a BaseWriteTransactionClass,
+ // I would not have needed this pass-through constructor and the
+ // skip_destructor flag.
+ explicit WriteTransaction(Directory* directory,
+ const char* name, WriterTag writer,
+ const char* source_file,
+ int line, bool skip_destructor,
+ OriginalEntries* originals);
+
+ const bool skip_destructor_;
+
+ // Before an entry gets modified, we copy the original into a list
+ // so that we can issue change notifications when the transaction
+ // is done.
+ OriginalEntries* const originals_;
+
+ DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
+};
+
+bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid);
+int ComparePathNames(const PathString& a, const PathString& b);
+
+// Exposed in header as this is used as a sqlite3 callback.
+int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
+ const void* b);
+
+int64 Now();
+
+// Does wildcard processing.
+BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec);
+
+PathString GetFullPath(BaseTransaction* trans, const Entry& e);
+
+inline void ReverseAppend(const PathString& s, PathString* target) {
+ target->append(s.rbegin(), s.rend());
+}
+
+class ExtendedAttribute {
+ public:
+ ExtendedAttribute(BaseTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key);
+ int64 metahandle() const { return i_->first.metahandle; }
+ const PathString& key() const { return i_->first.key; }
+ const Blob& value() const { return i_->second.value; }
+ bool is_deleted() const { return i_->second.is_deleted; }
+ bool good() const { return good_; }
+ bool operator < (const ExtendedAttribute& x) const {
+ return i_->first < x.i_->first;
+ }
+ protected:
+ bool Init(BaseTransaction* trans,
+ Directory::Kernel* const kernel,
+ ScopedKernelLock* lock,
+ const ExtendedAttributeKey& key);
+ ExtendedAttribute() { }
+ ExtendedAttributes::iterator i_;
+ bool good_;
+};
+
+class MutableExtendedAttribute : public ExtendedAttribute {
+ public:
+ MutableExtendedAttribute(WriteTransaction* trans, GetByHandle,
+ const ExtendedAttributeKey& key);
+ MutableExtendedAttribute(WriteTransaction* trans, Create,
+ const ExtendedAttributeKey& key);
+
+ Blob* mutable_value() {
+ i_->second.dirty = true;
+ i_->second.is_deleted = false;
+ return &(i_->second.value);
+ }
+
+ void delete_attribute() {
+ i_->second.dirty = true;
+ i_->second.is_deleted = true;
+ }
+};
+
+// Get an extended attribute from an Entry by name. Returns a pointer
+// to a const Blob containing the attribute data, or NULL if there is
+// no attribute with the given name. The pointer is valid for the
+// duration of the Entry's transaction.
+const Blob* GetExtendedAttributeValue(const Entry& e,
+ const PathString& attribute_name);
+
+// This function sets only the flags needed to get this entry to sync.
+void MarkForSyncing(syncable::MutableEntry* e);
+
+// This is not a reset. It just sets the numeric fields which are not
+// initialized by the constructor to zero.
+void ZeroFields(EntryKernel* entry, int first_field);
+
+} // namespace syncable
+
+std::ostream& operator <<(std::ostream&, const syncable::Blob&);
+
+browser_sync::FastDump& operator <<
+ (browser_sync::FastDump&, const syncable::Blob&);
+
+
+std::ostream& operator <<(std::ostream&, const syncable::ThreadNode&);
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
diff --git a/chrome/browser/sync/syncable/syncable_changes_version.h b/chrome/browser/sync/syncable/syncable_changes_version.h
new file mode 100644
index 0000000..26a5eb8
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_changes_version.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
+
+namespace syncable {
+
+// For the most part, the sync engine treats version numbers as opaque values.
+// However, there are parts of our code base that break this abstraction, and
+// depend on the following two invariants:
+//
+// 1. CHANGES_VERSION is less than 0.
+// 2. The server only issues positive version numbers.
+//
+// Breaking these abstractions makes some operations 10 times
+// faster. If either of these invariants change, then those queries
+// must be revisited.
+
+enum {
+ CHANGES_VERSION = -1
+};
+
+#define CHANGES_VERSION_STRING "-1"
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_CHANGES_VERSION_H_
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
new file mode 100644
index 0000000..10f7578
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_columns.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
+
+#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+
+namespace syncable {
+
+struct ColumnSpec {
+ const char* name;
+ const char* spec;
+};
+
+// Must be in exact same order as fields in syncable.
+static const ColumnSpec g_metas_columns[] = {
+ //////////////////////////////////////
+ // int64s
+ {"metahandle", "bigint primary key ON CONFLICT FAIL"},
+ {"base_version", "bigint default " CHANGES_VERSION_STRING},
+ {"server_version", "bigint default 0"},
+ // These timestamps are kept in native file timestamp format. It is
+ // up to the syncer to translate to Java time when syncing.
+ {"mtime", "bigint default 0"},
+ {"server_mtime", "bigint default 0"},
+ {"ctime", "bigint default 0"},
+ {"server_ctime", "bigint default 0"},
+ {"server_position_in_parent", "bigint default 0"},
+ // This is the item ID that we store for the embedding application.
+ {"local_external_id", "bigint default 0"},
+ //////////////////////////////////////
+ // Ids
+ {"id", "varchar(255) default \"r\""},
+ {"parent_id", "varchar(255) default \"r\""},
+ {"server_parent_id", "varchar(255) default \"r\""},
+ {"prev_id", "varchar(255) default \"r\""},
+ {"next_id", "varchar(255) default \"r\""},
+ //////////////////////////////////////
+ // bits
+ {"is_unsynced", "bit default 0"},
+ {"is_unapplied_update", "bit default 0"},
+ {"is_del", "bit default 0"},
+ {"is_dir", "bit default 0"},
+ {"is_bookmark_object", "bit default 0"},
+ {"server_is_dir", "bit default 0"},
+ {"server_is_del", "bit default 0"},
+ {"server_is_bookmark_object", "bit default 0"},
+ //////////////////////////////////////
+ // Strings
+ {"name", "varchar(255) COLLATE PATHNAME"},
+ {"unsanitized_name", "varchar(255) COLLATE PATHNAME"},
+ {"non_unique_name", "varchar"},
+ {"server_name", "varchar(255) COLLATE PATHNAME"},
+ {"server_non_unique_name", "varchar"},
+ {"bookmark_url", "varchar"},
+ {"server_bookmark_url", "varchar"},
+ {"singleton_tag", "varchar"},
+ //////////////////////////////////////
+ // Blobs.
+ {"bookmark_favicon", "blob"},
+ {"server_bookmark_favicon", "blob"},
+};
+
+// At least enforce that there are equal number of column names and fields.
+COMPILE_ASSERT(ARRAYSIZE(g_metas_columns) >= FIELD_COUNT, missing_column_name);
+COMPILE_ASSERT(ARRAYSIZE(g_metas_columns) <= FIELD_COUNT, extra_column_names);
+
+static inline const char* ColumnName(int field) {
+ DCHECK(field < BEGIN_TEMPS);
+ return g_metas_columns[field].name;
+}
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_COLUMNS_H_
diff --git a/chrome/browser/sync/syncable/syncable_id.cc b/chrome/browser/sync/syncable/syncable_id.cc
new file mode 100644
index 0000000..04d5afc
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+#include <iosfwd>
+
+#include "base/string_util.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+
+using std::ostream;
+using std::string;
+
+namespace syncable {
+const Id kNullId; // Currently == root.
+} // namespace syncable
+
+sqlite3_stmt* BindArg(sqlite3_stmt* statement, const syncable::Id& id,
+ int index) {
+ return BindArg(statement, id.s_.c_str(), index);
+}
+
+void GetColumn(sqlite3_stmt* statement, int index, syncable::Id* id) {
+ GetColumn(statement, index, &id->s_);
+}
+
+ostream& operator << (ostream& out, const syncable::Id& id) {
+ out << id.s_;
+ return out;
+}
+
+using browser_sync::FastDump;
+FastDump& operator << (FastDump& dump, const syncable::Id& id) {
+ dump.out_->sputn(id.s_.data(), id.s_.size());
+ return dump;
+}
+
+namespace syncable {
+
+string Id::AsQueryParam() const {
+ if ('s' == s_[0])
+ return s_.c_str() + 1;
+ return "";
+}
+
+string Id::GetServerId() const {
+ // Currently root is the string "0". We need to decide on a true value.
+ // "" would be convenient here, as the IsRoot call would not be needed.
+ if (IsRoot())
+ return "0";
+ return s_.substr(1);
+}
+
+Id Id::CreateFromServerId(const string& server_id) {
+ Id id;
+ if (server_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("s") + server_id;
+ return id;
+}
+
+Id Id::CreateFromClientString(const string& local_id) {
+ Id id;
+ if (local_id == "0")
+ id.s_ = "r";
+ else
+ id.s_ = string("c") + local_id;
+ return id;
+}
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_id.h b/chrome/browser/sync/syncable/syncable_id.h
new file mode 100644
index 0000000..5f2a28e
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
+#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
+
+#include <iosfwd>
+#include <limits>
+#include <sstream>
+#include <string>
+
+#include "base/hash_tables.h"
+#include "chrome/browser/sync/util/fast_dump.h"
+#include "chrome/browser/sync/util/sync_types.h"
+
+extern "C" {
+struct sqlite3;
+struct sqlite3_stmt;
+}
+
+namespace syncable {
+class Id;
+} // namespace syncable
+
+class MockConnectionManager;
+
+sqlite3_stmt* BindArg(sqlite3_stmt*, const syncable::Id&, int index);
+void GetColumn(sqlite3_stmt*, int index, syncable::Id* value);
+std::ostream& operator << (std::ostream& out, const syncable::Id& id);
+browser_sync::FastDump& operator <<
+ (browser_sync::FastDump& out, const syncable::Id& id);
+
+namespace syncable {
+
+// For historical reasons, 3 concepts got everloaded into the Id:
+// 1. A unique, opaque identifier for the object.
+// 2. Flag specifing whether server know about this object.
+// 3. Flag for root.
+//
+// We originally wrapped an integer for this information, but now we use a
+// string. It will have one of three forms:
+// 1. c<client only opaque id> for client items that have not been committed.
+// 2. r for the root item.
+// 3. s<server provided opaque id> for items that the server knows about.
+class Id {
+ friend sqlite3_stmt* ::BindArg(sqlite3_stmt*, const syncable::Id&, int index);
+ friend void ::GetColumn(sqlite3_stmt*, int index, syncable::Id* value);
+ friend std::ostream& ::operator << (std::ostream& out,
+ const syncable::Id& id);
+ friend browser_sync::FastDump& ::operator <<
+ (browser_sync::FastDump& out, const syncable::Id& id);
+ friend class MockConnectionManager;
+ friend class SyncableIdTest;
+ public:
+ // This constructor will be handy even when we move away from
+ // int64s, just for unit tests.
+ inline Id() : s_("r") { }
+ inline Id(const Id& that) {
+ Copy(that);
+ }
+ inline Id& operator = (const Id& that) {
+ Copy(that);
+ return *this;
+ }
+ inline void Copy(const Id& that) {
+ this->s_ = that.s_;
+ }
+ inline bool IsRoot() const {
+ return "r" == s_;
+ }
+ inline bool ServerKnows() const {
+ return s_[0] == 's' || s_ == "r";
+ }
+
+ // TODO(sync): We could use null here, but to ease conversion we use "r".
+ // fix this, this is madness :)
+ inline bool IsNull() const {
+ return IsRoot();
+ }
+ inline void Clear() {
+ s_ = "r";
+ }
+ std::string AsQueryParam() const;
+ // Must never allow id == 0 or id < 0 to compile.
+ inline bool operator == (const Id& that) const {
+ return s_ == that.s_;
+ }
+ inline bool operator != (const Id& that) const {
+ return s_ != that.s_;
+ }
+ inline bool operator < (const Id& that) const {
+ return s_ < that.s_;
+ }
+ inline bool operator > (const Id& that) const {
+ return s_ > that.s_;
+ }
+
+ public:
+ // Three functions used to work with our proto buffers.
+ std::string GetServerId() const;
+ static Id CreateFromServerId(const std::string& server_id);
+ // This should only be used if you get back a reference to a local
+ // id from the server. Returns a client only opaque id.
+ static Id CreateFromClientString(const std::string& local_id);
+ protected:
+ std::string s_;
+};
+
+extern const Id kNullId;
+
+} // namespace syncable
+
+#endif // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_ID_H_
diff --git a/chrome/browser/sync/syncable/syncable_id_unittest.cc b/chrome/browser/sync/syncable/syncable_id_unittest.cc
new file mode 100644
index 0000000..b592ad5
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_id_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable_id.h"
+
+#include <vector>
+
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::vector;
+
+namespace syncable {
+
+using browser_sync::TestIdFactory;
+
+class SyncableIdTest : public testing::Test { };
+
+TEST(SyncableIdTest, TestIDCreation) {
+ vector<Id> v;
+ v.push_back(TestIdFactory::FromNumber(5));
+ v.push_back(TestIdFactory::FromNumber(1));
+ v.push_back(TestIdFactory::FromNumber(-5));
+ v.push_back(TestIdFactory::MakeLocal("A"));
+ v.push_back(TestIdFactory::MakeLocal("B"));
+ v.push_back(TestIdFactory::MakeServer("A"));
+ v.push_back(TestIdFactory::MakeServer("B"));
+ v.push_back(Id::CreateFromServerId("-5"));
+ v.push_back(Id::CreateFromClientString("A"));
+ v.push_back(Id::CreateFromServerId("A"));
+
+ for (vector<Id>::iterator i = v.begin(); i != v.end(); ++i) {
+ for (vector<Id>::iterator j = v.begin(); j != i; ++j) {
+ ASSERT_NE(*i, *j) << "mis equated two distinct ids";
+ }
+ ASSERT_EQ(*i, *i) << "self-equality failed";
+ Id copy1 = *i;
+ Id copy2 = *i;
+ ASSERT_EQ(copy1, copy2) << "equality after copy failed";
+ }
+}
+
+} // namespace syncable
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
new file mode 100644
index 0000000..63bc153
--- /dev/null
+++ b/chrome/browser/sync/syncable/syncable_unittest.cc
@@ -0,0 +1,1554 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+#include <sys/types.h>
+
+#include <iostream>
+#include <limits>
+#include <string>
+
+// TODO(ncarter): Winnow down the OS-specific includes from the test
+// file.
+#if defined(OS_WINDOWS)
+#include <tchar.h>
+#include <atlbase.h>
+#include <process.h>
+#endif // defined(OS_WINDOWS)
+
+#if !defined(OS_WINDOWS)
+#define MAX_PATH PATH_MAX
+#include <strstream>
+#include <ostream>
+#include <stdio.h>
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#include <sys/times.h>
+#endif // !defined(OS_WINDOWS)
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+#include "base/scoped_ptr.h"
+#include "chrome/browser/sync/syncable/directory_backing_store.h"
+#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/character_set_converters.h"
+#include "chrome/browser/sync/util/closure.h"
+#include "chrome/browser/sync/util/compat-file.h"
+#include "chrome/browser/sync/util/event_sys-inl.h"
+#include "chrome/browser/sync/util/path_helpers.h"
+#include "chrome/browser/sync/util/pthread_helpers.h"
+#include "chrome/browser/sync/util/query_helpers.h"
+#include "chrome/test/sync/engine/test_id_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/sqlite/preprocessed/sqlite3.h"
+
+using browser_sync::TestIdFactory;
+using std::cout;
+using std::endl;
+using std::string;
+
+namespace syncable {
+
+// A lot of these tests were written expecting to be able to read and
+// write object data on entries. However, the design has changed.
+void PutDataAsExtendedAttribute(WriteTransaction *wtrans,
+ MutableEntry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ ExtendedAttributeKey key(e->Get(META_HANDLE), PSTR("DATA"));
+ MutableExtendedAttribute attr(wtrans, CREATE, key);
+ Blob bytes_blob(bytes, bytes + bytes_length);
+ attr.mutable_value()->swap(bytes_blob);
+}
+
+void ExpectDataFromExtendedAttributeEquals(BaseTransaction *trans,
+ Entry* e,
+ const char* bytes,
+ size_t bytes_length) {
+ Blob expected_value(bytes, bytes + bytes_length);
+ ExtendedAttributeKey key(e->Get(META_HANDLE), PSTR("DATA"));
+ ExtendedAttribute attr(trans, GET_BY_HANDLE, key);
+ EXPECT_FALSE(attr.is_deleted());
+ EXPECT_EQ(expected_value, attr.value());
+}
+
+
+TEST(Syncable, General) {
+ remove("SimpleTest.sqlite3");
+ Directory dir;
+ dir.Open(PSTR("SimpleTest.sqlite3"), PSTR("SimpleTest"));
+ bool entry_exists = false;
+ int64 metahandle;
+ const Id id = TestIdFactory::FromNumber(99);
+ // Test simple read operations.
+ {
+ ReadTransaction rtrans(&dir, __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, id);
+ if (e.good()) {
+ entry_exists = true;
+ metahandle = e.Get(META_HANDLE);
+ }
+ Directory::ChildHandles child_handles;
+ dir.GetChildHandles(&rtrans, rtrans.root_id(), &child_handles);
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i)
+ cout << *i << endl;
+
+ Entry e2(&rtrans, GET_BY_PATH, PSTR("/Hello\\World/"));
+ }
+
+ // Test creating a new meta entry.
+ {
+ WriteTransaction wtrans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), PSTR("Jeff"));
+ ASSERT_TRUE(entry_exists ? !me.good() : me.good());
+ if (me.good()) {
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ metahandle = me.Get(META_HANDLE);
+ }
+ }
+
+ // Test writing data to an entity.
+ static const char s[] = "Hello World.";
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH,
+ PathString(kPathSeparator) + PSTR("Jeff"));
+ ASSERT_TRUE(e.good());
+ PutDataAsExtendedAttribute(&trans, &e, s, sizeof(s));
+ }
+
+ // Test reading back the name contents that we just wrote.
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH,
+ PathString(kPathSeparator) + PSTR("Jeff"));
+ ASSERT_TRUE(e.good());
+ ExpectDataFromExtendedAttributeEquals(&trans, &e, s, sizeof(s));
+ }
+
+ // Now delete it.
+ {
+ WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, CREATE, trans.root_id(), PSTR("New File"));
+ e.Put(IS_DEL, true);
+ }
+
+ dir.SaveChanges();
+}
+
+TEST(Syncable, NameClassTest) {
+ const PathString foo(PSTR("foo"));
+ const PathString bar(PSTR("bar"));
+
+ Name name1(foo);
+ EXPECT_EQ(name1.value(), foo);
+ EXPECT_EQ(name1.db_value(), foo);
+ EXPECT_FALSE(name1.HasBeenSanitized());
+ EXPECT_TRUE(name1.GetUnsanitizedName().empty());
+
+ Name name2(foo, foo);
+ EXPECT_EQ(name2.value(), foo);
+ EXPECT_EQ(name2.db_value(), foo);
+ EXPECT_FALSE(name2.HasBeenSanitized());
+ EXPECT_TRUE(name2.GetUnsanitizedName().empty());
+
+ Name name3(foo, bar);
+ EXPECT_EQ(name3.value(), bar);
+ EXPECT_EQ(name3.db_value(), foo);
+ EXPECT_TRUE(name3.HasBeenSanitized());
+ EXPECT_EQ(name3.GetUnsanitizedName(), bar);
+
+ EXPECT_TRUE(name1 == name2);
+ EXPECT_FALSE(name1 != name2);
+ EXPECT_FALSE(name2 == name3);
+ EXPECT_TRUE(name2 != name3);
+}
+
+namespace {
+
+// A Directory whose backing store always fails SaveChanges by returning false.
+class TestUnsaveableDirectory : public Directory {
+ public:
+ class UnsaveableBackingStore : public DirectoryBackingStore {
+ public:
+ UnsaveableBackingStore(const PathString& dir_name,
+ const PathString& backing_filepath)
+ : DirectoryBackingStore(dir_name, backing_filepath) { }
+ virtual bool SaveChanges(const Directory::SaveChangesSnapshot& snapshot) {
+ return false;
+ }
+ };
+ virtual DirectoryBackingStore* CreateBackingStore(
+ const PathString& dir_name, const PathString& backing_filepath) {
+ return new UnsaveableBackingStore(dir_name, backing_filepath);
+ }
+};
+
+// Test suite for syncable::Directory.
+class SyncableDirectoryTest : public testing::Test {
+ protected:
+ static const PathString kFilePath;
+ static const PathString kName;
+ static const PathChar *kSqlite3File;
+ static const Id kId;
+
+ // SetUp() is called before each test case is run.
+ // The sqlite3 DB is deleted before each test is run.
+ virtual void SetUp() {
+ PathRemove(PathString(kSqlite3File));
+ dir_.reset(new Directory());
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(kFilePath, kName));
+ ASSERT_TRUE(dir_->good());
+ }
+
+ virtual void TearDown() {
+ // This also closes file handles.
+ dir_->SaveChanges();
+ dir_.reset();
+ PathRemove(PathString(kSqlite3File));
+ }
+
+ scoped_ptr<Directory> dir_;
+
+ // Creates an empty entry and sets the ID field to the default kId.
+ void CreateEntry(const PathString &entryname) {
+ CreateEntry(entryname, kId);
+ }
+
+ // Creates an empty entry and sets the ID field to id.
+ void CreateEntry(const PathString &entryname, const int id) {
+ CreateEntry(entryname, TestIdFactory::FromNumber(id));
+ }
+ void CreateEntry(const PathString &entryname, Id id) {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), entryname);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(IS_UNSYNCED, true);
+ }
+
+ void ValidateEntry(BaseTransaction *trans, int64 id, bool check_name,
+ PathString name, int64 base_version, int64 server_version, bool is_del);
+ void CreateAndCheck(WriteTransaction *trans, int64 parent_id, int64 id,
+ PathString name, PathString server_name, int64 version,
+ bool set_server_fields, bool is_dir, bool add_to_lru, int64 *meta_handle);
+};
+
+const PathString SyncableDirectoryTest::kFilePath(PSTR("Test.sqlite3"));
+const PathChar* SyncableDirectoryTest::kSqlite3File(PSTR("Test.sqlite3"));
+const PathString SyncableDirectoryTest::kName(PSTR("Foo"));
+const Id SyncableDirectoryTest::kId(TestIdFactory::FromNumber(-99));
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupNonExistantID) {
+ ReadTransaction rtrans(dir_.get(), __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, kId);
+ ASSERT_FALSE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
+ CreateEntry(PSTR("rtc"));
+ ReadTransaction rtrans(dir_.get(), __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, kId);
+ ASSERT_TRUE(e.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestBasicCaseSensitivity) {
+ PathString name = PSTR("RYAN");
+ PathString conflicting_name = PSTR("ryan");
+ CreateEntry(name);
+
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), conflicting_name);
+ ASSERT_FALSE(me.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestDelete) {
+ PathString name = PSTR("peanut butter jelly time");
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e1(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+ MutableEntry e2(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e2.good());
+ ASSERT_TRUE(e2.Put(IS_DEL, true));
+ MutableEntry e3(&trans, CREATE, trans.root_id(), name);
+ ASSERT_TRUE(e3.good());
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+
+ ASSERT_TRUE(e3.Put(IS_DEL, false));
+ ASSERT_FALSE(e1.Put(IS_DEL, false));
+ ASSERT_FALSE(e2.Put(IS_DEL, false));
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
+
+ ASSERT_TRUE(e1.Put(IS_DEL, false));
+ ASSERT_FALSE(e2.Put(IS_DEL, false));
+ ASSERT_FALSE(e3.Put(IS_DEL, false));
+ ASSERT_TRUE(e1.Put(IS_DEL, true));
+}
+
+TEST_F(SyncableDirectoryTest, TestGetFullPathNeverCrashes) {
+ PathString dirname = PSTR("honey"),
+ childname = PSTR("jelly");
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e1(&trans, CREATE, trans.root_id(), dirname);
+ ASSERT_TRUE(e1.good());
+ ASSERT_TRUE(e1.Put(IS_DIR, true));
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), childname);
+ ASSERT_TRUE(e2.good());
+ PathString path = GetFullPath(&trans, e2);
+ ASSERT_FALSE(path.empty());
+ // Give the child a parent that doesn't exist.
+ e2.Put(PARENT_ID, TestIdFactory::FromNumber(42));
+ path = GetFullPath(&trans, e2);
+ ASSERT_TRUE(path.empty());
+ // Done testing, make sure CheckTreeInvariants doesn't choke.
+ e2.Put(PARENT_ID, e1.Get(ID));
+ e2.Put(IS_DEL, true);
+ e1.Put(IS_DEL, true);
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
+ Directory::UnsyncedMetaHandles handles;
+ int64 handle1, handle2;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("abba"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), PSTR("bread"));
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNSYNCED, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(2, handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ ASSERT_TRUE(e5.Get(IS_UNSYNCED));
+ ASSERT_TRUE(e5.Put(IS_UNSYNCED, false));
+ ASSERT_FALSE(e5.Get(IS_UNSYNCED));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnsyncedMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestGetUnappliedUpdates) {
+ Directory::UnappliedUpdateMetaHandles handles;
+ int64 handle1, handle2;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("abba"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(IS_UNAPPLIED_UPDATE, false);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ e1.Put(IS_DIR, true);
+
+ MutableEntry e2(&trans, CREATE, e1.Get(ID), PSTR("bread"));
+ ASSERT_TRUE(e2.good());
+ handle2 = e2.Get(META_HANDLE);
+ e2.Put(IS_UNAPPLIED_UPDATE, false);
+ e2.Put(BASE_VERSION, 1);
+ e2.Put(ID, TestIdFactory::FromNumber(102));
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(0, handles.size());
+
+ MutableEntry e3(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e3.good());
+ e3.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle1 == handles[0]);
+
+ MutableEntry e4(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(e4.good());
+ e4.Put(IS_UNAPPLIED_UPDATE, true);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(2, handles.size());
+ if (handle1 == handles[0]) {
+ ASSERT_TRUE(handle2 == handles[1]);
+ } else {
+ ASSERT_TRUE(handle2 == handles[0]);
+ ASSERT_TRUE(handle1 == handles[1]);
+ }
+
+ MutableEntry e5(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e5.good());
+ e5.Put(IS_UNAPPLIED_UPDATE, false);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ dir_->GetUnappliedUpdateMetaHandles(&trans, &handles);
+ ASSERT_EQ(1, handles.size());
+ ASSERT_TRUE(handle2 == handles[0]);
+ }
+}
+
+
+TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
+ // Try to evoke a check failure...
+ TestIdFactory id_factory;
+ int64 grandchild_handle, twin_handle;
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry parent(&wtrans, CREATE, id_factory.root(), PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+ ASSERT_TRUE(grandchild.Put(IS_DEL, true));
+ MutableEntry twin(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(twin.good());
+ ASSERT_TRUE(twin.Put(IS_DEL, true));
+ ASSERT_TRUE(grandchild.Put(IS_DEL, false));
+ ASSERT_FALSE(twin.Put(IS_DEL, false));
+ grandchild_handle = grandchild.Get(META_HANDLE);
+ twin_handle = twin.Get(META_HANDLE);
+ }
+ dir_->SaveChanges();
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry grandchild(&wtrans, GET_BY_HANDLE, grandchild_handle);
+ grandchild.Put(IS_DEL, true); // Used to CHECK fail here.
+ }
+}
+
+static inline bool IsLegalNewParent(const Entry& a, const Entry& b) {
+ return IsLegalNewParent(a.trans(), a.Get(ID), b.Get(ID));
+}
+
+TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
+ TestIdFactory id_factory;
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ Entry root(&wtrans, GET_BY_ID, id_factory.root());
+ ASSERT_TRUE(root.good());
+ MutableEntry parent(&wtrans, CREATE, root.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(parent.good());
+ parent.Put(IS_DIR, true);
+ parent.Put(ID, id_factory.NewServerId());
+ parent.Put(BASE_VERSION, 1);
+ MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(child.good());
+ child.Put(IS_DIR, true);
+ child.Put(ID, id_factory.NewServerId());
+ child.Put(BASE_VERSION, 1);
+ MutableEntry grandchild(&wtrans, CREATE, child.Get(ID), PSTR("Bob"));
+ ASSERT_TRUE(grandchild.good());
+ grandchild.Put(ID, id_factory.NewServerId());
+ grandchild.Put(BASE_VERSION, 1);
+
+ MutableEntry parent2(&wtrans, CREATE, root.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(IS_DIR, true);
+ parent2.Put(ID, id_factory.NewServerId());
+ parent2.Put(BASE_VERSION, 1);
+ MutableEntry child2(&wtrans, CREATE, parent2.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(child2.good());
+ child2.Put(IS_DIR, true);
+ child2.Put(ID, id_factory.NewServerId());
+ child2.Put(BASE_VERSION, 1);
+ MutableEntry grandchild2(&wtrans, CREATE, child2.Get(ID), PSTR("Pete"));
+ ASSERT_TRUE(grandchild2.good());
+ grandchild2.Put(ID, id_factory.NewServerId());
+ grandchild2.Put(BASE_VERSION, 1);
+ // resulting tree
+ // root
+ // / \
+ // parent parent2
+ // | |
+ // child child2
+ // | |
+ // grandchild grandchild2
+ ASSERT_TRUE(IsLegalNewParent(child, root));
+ ASSERT_TRUE(IsLegalNewParent(child, parent));
+ ASSERT_FALSE(IsLegalNewParent(child, child));
+ ASSERT_FALSE(IsLegalNewParent(child, grandchild));
+ ASSERT_TRUE(IsLegalNewParent(child, parent2));
+ ASSERT_TRUE(IsLegalNewParent(child, grandchild2));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(root, grandchild));
+ ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
+}
+
+TEST_F(SyncableDirectoryTest, TestFindEntryInFolder) {
+ // Create a subdir and an entry.
+ int64 entry_handle;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.Put(IS_DIR, true));
+ EXPECT_TRUE(folder.Put(IS_UNSYNCED, true));
+ MutableEntry entry(&trans, CREATE, folder.Get(ID), PSTR("entry"));
+ ASSERT_TRUE(entry.good());
+ entry_handle = entry.Get(META_HANDLE);
+ entry.Put(IS_UNSYNCED, true);
+ }
+
+ // Make sure we can find the entry in the folder.
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PATH, PathString(kPathSeparator) +
+ PSTR("folder") +
+ kPathSeparator + PSTR("entry"));
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(entry.Get(META_HANDLE), entry_handle);
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestGetByParentIdAndName) {
+ PathString name = PSTR("Bob");
+ Id id = TestIdFactory::MakeServer("ID for Bob");
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, CREATE, wtrans.root_id() /*entry id*/, name);
+ ASSERT_TRUE(entry.good());
+ entry.Put(IS_DIR, true);
+ entry.Put(ID, id);
+ entry.Put(BASE_VERSION, 1);
+ entry.Put(IS_UNSYNCED, true);
+ }
+ {
+ WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry entry(&wtrans, GET_BY_PARENTID_AND_NAME, wtrans.root_id(),
+ name);
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(id, entry.Get(ID));
+ }
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry entry(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), name);
+ ASSERT_TRUE(entry.good());
+ ASSERT_EQ(id, entry.Get(ID));
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestParentIDIndexUpdate) {
+ WriteTransaction wt(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&wt, CREATE, wt.root_id(), PSTR("oldname"));
+ folder.Put(NAME, PSTR("newname"));
+ folder.Put(IS_UNSYNCED, true);
+ Entry entry(&wt, GET_BY_PATH, PSTR("newname"));
+ ASSERT_TRUE(entry.good());
+}
+
+TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
+ ASSERT_TRUE(folder.good());
+ ASSERT_TRUE(folder.Put(IS_DIR, true));
+ ASSERT_TRUE(folder.Put(IS_DEL, true));
+ Entry gone(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("folder"));
+ ASSERT_FALSE(gone.good());
+ ASSERT_TRUE(folder.PutParentIdAndName(trans.root_id(),
+ Name(PSTR("new_name"))));
+}
+
+TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("CaseChange"));
+ ASSERT_TRUE(folder.good());
+ EXPECT_TRUE(folder.PutParentIdAndName(trans.root_id(),
+ Name(PSTR("CASECHANGE"))));
+ EXPECT_TRUE(folder.Put(IS_DEL, true));
+}
+
+TEST_F(SyncableDirectoryTest, TestShareInfo) {
+ dir_->set_last_sync_timestamp(100);
+ dir_->set_store_birthday("Jan 31st");
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ EXPECT_EQ(100, dir_->last_sync_timestamp());
+ EXPECT_EQ("Jan 31st", dir_->store_birthday());
+ }
+ dir_->set_last_sync_timestamp(200);
+ dir_->set_store_birthday("April 10th");
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ EXPECT_EQ(200, dir_->last_sync_timestamp());
+ EXPECT_EQ("April 10th", dir_->store_birthday());
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestSimpleFieldsPreservedDuringSaveChanges) {
+ Id id = TestIdFactory::FromNumber(1);
+ EntryKernel create_pre_save, update_pre_save;
+ EntryKernel create_post_save, update_post_save;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("Create"));
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, id);
+ create.Put(IS_UNSYNCED, true);
+ update.Put(IS_UNAPPLIED_UPDATE, true);
+ create_pre_save = create.GetKernelCopy();
+ update_pre_save = update.GetKernelCopy();
+ }
+ dir_->SaveChanges();
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry create(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(),
+ PSTR("Create"));
+ Entry update(&trans, GET_BY_ID, id);
+ create_post_save = create.GetKernelCopy();
+ update_post_save = update.GetKernelCopy();
+ }
+ int i = BEGIN_FIELDS;
+ for ( ; i < INT64_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((Int64Field)i),
+ create_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((Int64Field)i),
+ update_post_save.ref((Int64Field)i))
+ << "int64 field #" << i << " changed during save/load";
+ }
+ for ( ; i < ID_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((IdField)i),
+ create_post_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((IdField)i),
+ update_pre_save.ref((IdField)i))
+ << "id field #" << i << " changed during save/load";
+ }
+ for ( ; i < BIT_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((BitField)i),
+ create_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((BitField)i),
+ update_post_save.ref((BitField)i))
+ << "Bit field #" << i << " changed during save/load";
+ }
+ for ( ; i < STRING_FIELDS_END ; ++i) {
+ EXPECT_EQ(create_pre_save.ref((StringField)i),
+ create_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ EXPECT_EQ(update_pre_save.ref((StringField)i),
+ update_post_save.ref((StringField)i))
+ << "String field #" << i << " changed during save/load";
+ }
+}
+
+TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) {
+ int64 handle1 = 0;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry e1(&trans, CREATE, trans.root_id(), PSTR("aguilera"));
+ ASSERT_TRUE(e1.good());
+ handle1 = e1.Get(META_HANDLE);
+ e1.Put(BASE_VERSION, 1);
+ e1.Put(IS_DIR, true);
+ e1.Put(ID, TestIdFactory::FromNumber(101));
+ }
+ ASSERT_TRUE(dir_->SaveChanges());
+
+ dir_.reset(new TestUnsaveableDirectory());
+ ASSERT_TRUE(dir_.get());
+ ASSERT_EQ(OPENED, dir_->Open(kFilePath, kName));
+ ASSERT_TRUE(dir_->good());
+ int64 handle2 = 0;
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+
+ MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(aguilera.good());
+ aguilera.Put(NAME, PSTR("christina"));
+ ASSERT_TRUE(aguilera.GetKernelCopy().dirty[NAME]);
+
+ MutableEntry kids_on_block(&trans, CREATE, trans.root_id(), PSTR("kids"));
+ ASSERT_TRUE(kids_on_block.good());
+ handle2 = kids_on_block.Get(META_HANDLE);
+ kids_on_block.Put(BASE_VERSION, 1);
+ kids_on_block.Put(IS_DIR, true);
+ kids_on_block.Put(ID, TestIdFactory::FromNumber(102));
+ EXPECT_TRUE(kids_on_block.Get(IS_NEW));
+ }
+
+ // We are using an unsaveable directory, so this can't succeed. However,
+ // the HandleSaveChangesFailure code path should have been triggered.
+ ASSERT_FALSE(dir_->SaveChanges());
+
+ // Make sure things were rolled back and the world is as it was before call.
+ {
+ ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
+ Entry e1(&trans, GET_BY_HANDLE, handle1);
+ ASSERT_TRUE(e1.good());
+ const EntryKernel& aguilera = e1.GetKernelCopy();
+ Entry kids_on_block(&trans, GET_BY_HANDLE, handle2);
+ ASSERT_TRUE(kids_on_block.good());
+
+ EXPECT_TRUE(aguilera.dirty[NAME]);
+ EXPECT_TRUE(kids_on_block.Get(IS_NEW));
+ }
+}
+
+
+void SyncableDirectoryTest::ValidateEntry(BaseTransaction *trans, int64 id,
+ bool check_name, PathString name, int64 base_version, int64 server_version,
+ bool is_del) {
+ Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
+ ASSERT_TRUE(e.good());
+ if (check_name)
+ ASSERT_EQ(name, e.Get(NAME));
+ ASSERT_EQ(base_version, e.Get(BASE_VERSION));
+ ASSERT_EQ(server_version, e.Get(SERVER_VERSION));
+ ASSERT_EQ(is_del, e.Get(IS_DEL));
+}
+
+TEST(SyncableDirectoryManager, TestFileRelease) {
+ DirectoryManager dm(PSTR("."));
+ ASSERT_TRUE(dm.Open(PSTR("ScopeTest")));
+ {
+ ScopedDirLookup(&dm, PSTR("ScopeTest"));
+ }
+ dm.Close(PSTR("ScopeTest"));
+ ASSERT_EQ(0, PathRemove(dm.GetSyncDataDatabasePath()));
+}
+
+static void* OpenTestThreadMain(void* arg) {
+ DirectoryManager* const dm = reinterpret_cast<DirectoryManager*>(arg);
+ CHECK(dm->Open(PSTR("Open")));
+ return 0;
+}
+
+TEST(SyncableDirectoryManager, ThreadOpenTest) {
+ DirectoryManager dm(PSTR("."));
+ pthread_t thread;
+ ASSERT_EQ(0, pthread_create(&thread, 0, OpenTestThreadMain, &dm));
+ void* result;
+ ASSERT_EQ(0, pthread_join(thread, &result));
+ {
+ ScopedDirLookup dir(&dm, PSTR("Open"));
+ ASSERT_TRUE(dir.good());
+ }
+ dm.Close(PSTR("Open"));
+ ScopedDirLookup dir(&dm, PSTR("Open"));
+ ASSERT_FALSE(dir.good());
+}
+
+namespace ThreadBug1 {
+ struct Step {
+ PThreadMutex mutex;
+ PThreadCondVar condvar;
+ int number;
+ int64 metahandle;
+ };
+ struct ThreadArg {
+ int role; // 0 or 1, meaning this thread does the odd or event steps.
+ Step* step;
+ DirectoryManager* dirman;
+ };
+
+ void* ThreadMain(void* arg) {
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ const int role = args->role;
+ Step* const step = args->step;
+ DirectoryManager* const dirman = args->dirman;
+ const PathString dirname = PSTR("ThreadBug1");
+ PThreadScopedLock<PThreadMutex> lock(&step->mutex);
+ while (step->number < 3) {
+ while (step->number % 2 != role)
+ pthread_cond_wait(&step->condvar.condvar_, &step->mutex.mutex_);
+ switch (step->number) {
+ case 0:
+ dirman->Open(dirname);
+ break;
+ case 1:
+ {
+ dirman->Close(dirname);
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&trans, CREATE, trans.root_id(), PSTR("Jeff"));
+ step->metahandle = me.Get(META_HANDLE);
+ me.Put(IS_UNSYNCED, true);
+ }
+ break;
+ case 2:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_HANDLE, step->metahandle);
+ CHECK(e.good()); // Failed due to ThreadBug1
+ }
+ dirman->Close(dirname);
+ break;
+ }
+ step->number += 1;
+ pthread_cond_signal(&step->condvar.condvar_);
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectoryManager, ThreadBug1) {
+ using ThreadBug1::Step;
+ using ThreadBug1::ThreadArg;
+ using ThreadBug1::ThreadMain;
+
+ Step step;
+ step.number = 0;
+ DirectoryManager dirman(PSTR("."));
+ ThreadArg arg1 = { 0, &step, &dirman };
+ ThreadArg arg2 = { 1, &step, &dirman };
+ pthread_t thread1, thread2;
+ ASSERT_EQ(0, pthread_create(&thread1, NULL, &ThreadMain, &arg1));
+ ASSERT_EQ(0, pthread_create(&thread2, NULL, &ThreadMain, &arg2));
+ void* retval;
+ ASSERT_EQ(0, pthread_join(thread1, &retval));
+ ASSERT_EQ(0, pthread_join(thread2, &retval));
+}
+
+namespace DirectoryKernelStalenessBug {
+ // The in-memory information would get out of sync because a
+ // directory would be closed and re-opened, and then an old
+ // Directory::Kernel with stale information would get saved to the db.
+ typedef ThreadBug1::Step Step;
+ typedef ThreadBug1::ThreadArg ThreadArg;
+
+ void* ThreadMain(void* arg) {
+ const char test_bytes[] = "test data";
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ const int role = args->role;
+ Step* const step = args->step;
+ DirectoryManager* const dirman = args->dirman;
+ const PathString dirname = PSTR("DirectoryKernelStalenessBug");
+ PThreadScopedLock<PThreadMutex> lock(&step->mutex);
+ while (step->number < 4) {
+ while (step->number % 2 != role)
+ pthread_cond_wait(&step->condvar.condvar_, &step->mutex.mutex_);
+ switch (step->number) {
+ case 0:
+ {
+ // Clean up remnants of earlier test runs.
+ PathRemove(dirman->GetSyncDataDatabasePath());
+ // Test.
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry me(&trans, CREATE, trans.root_id(), PSTR("Jeff"));
+ me.Put(BASE_VERSION, 1);
+ me.Put(ID, TestIdFactory::FromNumber(100));
+ PutDataAsExtendedAttribute(&trans, &me, test_bytes,
+ sizeof(test_bytes));
+ }
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ dir->SaveChanges();
+ }
+ dirman->CloseAllDirectories();
+ break;
+ case 1:
+ {
+ dirman->Open(dirname);
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ }
+ break;
+ case 2:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ }
+ break;
+ case 3:
+ {
+ ScopedDirLookup dir(dirman, dirname);
+ CHECK(dir.good());
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_PATH, PSTR("Jeff"));
+ ExpectDataFromExtendedAttributeEquals(&trans, &e, test_bytes,
+ sizeof(test_bytes));
+ }
+ // Same result as CloseAllDirectories, but more code coverage.
+ dirman->Close(dirname);
+ break;
+ }
+ step->number += 1;
+ pthread_cond_signal(&step->condvar.condvar_);
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectoryManager, DirectoryKernelStalenessBug) {
+ using DirectoryKernelStalenessBug::Step;
+ using DirectoryKernelStalenessBug::ThreadArg;
+ using DirectoryKernelStalenessBug::ThreadMain;
+
+ Step step;
+ step.number = 0;
+ DirectoryManager dirman(PSTR("."));
+ ThreadArg arg1 = { 0, &step, &dirman };
+ ThreadArg arg2 = { 1, &step, &dirman };
+ pthread_t thread1, thread2;
+ ASSERT_EQ(0, pthread_create(&thread1, NULL, &ThreadMain, &arg1));
+ ASSERT_EQ(0, pthread_create(&thread2, NULL, &ThreadMain, &arg2));
+ void* retval;
+ ASSERT_EQ(0, pthread_join(thread1, &retval));
+ ASSERT_EQ(0, pthread_join(thread2, &retval));
+}
+
+timespec operator + (const timespec& a, const timespec& b) {
+ const long nanos = a.tv_nsec + b.tv_nsec;
+ static const long nanos_per_second = 1000000000;
+ timespec r = { a.tv_sec + b.tv_sec + (nanos / nanos_per_second),
+ nanos % nanos_per_second };
+ return r;
+}
+
+void SleepMs(int milliseconds) {
+#ifdef OS_WINDOWS
+ Sleep(milliseconds);
+#else
+ usleep(milliseconds * 1000);
+#endif
+}
+
+namespace StressTransaction {
+ struct Globals {
+ DirectoryManager* dirman;
+ PathString dirname;
+ };
+
+ struct ThreadArg {
+ Globals* globals;
+ int thread_number;
+ };
+
+ void* ThreadMain(void* arg) {
+ ThreadArg* const args = reinterpret_cast<ThreadArg*>(arg);
+ Globals* const globals = args->globals;
+ ScopedDirLookup dir(globals->dirman, globals->dirname);
+ CHECK(dir.good());
+ int entry_count = 0;
+ PathString path_name;
+ for (int i = 0; i < 20; ++i) {
+ const int rand_action = rand() % 10;
+ if (rand_action < 4 && !path_name.empty()) {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry e(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), path_name);
+ SleepMs(rand() % 10);
+ CHECK(e.good());
+ } else {
+ string unique_name = StringPrintf("%d.%d", args->thread_number,
+ entry_count++);
+ path_name.assign(unique_name.begin(), unique_name.end());
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, CREATE, trans.root_id(), path_name);
+ CHECK(e.good());
+ SleepMs(rand() % 20);
+ e.Put(IS_UNSYNCED, true);
+ if (e.Put(ID, TestIdFactory::FromNumber(rand())) &&
+ e.Get(ID).ServerKnows() && !e.Get(ID).IsRoot())
+ e.Put(BASE_VERSION, 1);
+ }
+ }
+ return 0;
+ }
+}
+
+TEST(SyncableDirectory, StressTransactions) {
+ using StressTransaction::Globals;
+ using StressTransaction::ThreadArg;
+ using StressTransaction::ThreadMain;
+
+ DirectoryManager dirman(PSTR("."));
+ Globals globals;
+ globals.dirname = PSTR("stress");
+ globals.dirman = &dirman;
+ PathRemove(dirman.GetSyncDataDatabasePath());
+ dirman.Open(globals.dirname);
+ const int kThreadCount = 7;
+ pthread_t threads[kThreadCount];
+ ThreadArg thread_args[kThreadCount];
+ for (int i = 0; i < kThreadCount; ++i) {
+ thread_args[i].thread_number = i;
+ thread_args[i].globals = &globals;
+ ASSERT_EQ(0, pthread_create(threads + i, NULL, &ThreadMain,
+ thread_args + i));
+ }
+ void* retval;
+ for (pthread_t* i = threads; i < threads + kThreadCount; ++i)
+ ASSERT_EQ(0, pthread_join(*i, &retval));
+ dirman.Close(globals.dirname);
+ PathRemove(dirman.GetSyncDataDatabasePath());
+}
+
+static PathString UTF8ToPathStringQuick(const char *str) {
+ PathString ret;
+ CHECK(browser_sync::UTF8ToPathString(str, strlen(str), &ret));
+ return ret;
+}
+
+// returns number of chars used. max possible is 4
+// This algorithm was coded from the table at
+// http://en.wikipedia.org/w/index.php?title=UTF-8&oldid=153391259
+// there are no endian issues.
+static int UTF32ToUTF8(uint32 incode, unsigned char *out) {
+ if (incode <= 0x7f) {
+ out[0] = incode;
+ return 1;
+ }
+ if (incode <= 0x7ff) {
+ out[0] = 0xC0;
+ out[0] |= (incode >> 6);
+ out[1] = 0x80;
+ out[1] |= (incode & 0x3F);
+ return 2;
+ }
+ if (incode <= 0xFFFF) {
+ if ((incode > 0xD7FF) && (incode < 0xE000))
+ return 0;
+ out[0] = 0xE0;
+ out[0] |= (incode >> 12);
+ out[1] = 0x80;
+ out[1] |= (incode >> 6) & 0x3F;
+ out[2] = 0x80;
+ out[2] |= incode & 0x3F;
+ return 3;
+ }
+ if (incode <= 0x10FFFF) {
+ out[0] = 0xF0;
+ out[0] |= incode >> 18;
+ out[1] = 0x80;
+ out[1] |= (incode >> 12) & 0x3F;
+ out[2] = 0x80;
+ out[2] |= (incode >> 6) & 0x3F;
+ out[3] = 0x80;
+ out[3] |= incode & 0x3F;
+ return 4;
+ }
+ return 0;
+}
+
+TEST(Syncable, ComparePathNames) {
+ struct {
+ char a;
+ char b;
+ int expected_result;
+ } tests[] = {
+ { 'A', 'A', 0 },
+ { 'A', 'a', 0 },
+ { 'a', 'A', 0 },
+ { 'a', 'a', 0 },
+ { 'A', 'B', -1 },
+ { 'A', 'b', -1 },
+ { 'a', 'B', -1 },
+ { 'a', 'b', -1 },
+ { 'B', 'A', 1 },
+ { 'B', 'a', 1 },
+ { 'b', 'A', 1 },
+ { 'b', 'a', 1 } };
+ for (int i = 0; i < ARRAYSIZE(tests); ++i) {
+ PathString a(1, tests[i].a);
+ PathString b(1, tests[i].b);
+ const int result = ComparePathNames(a, b);
+ if (result != tests[i].expected_result) {
+ ADD_FAILURE() << "ComparePathNames(" << tests[i].a << ", " << tests[i].b
+ << ") returned " << result << "; expected "
+ << tests[i].expected_result;
+ }
+ }
+
+#ifndef OS_WINDOWS
+ // This table lists (to the best of my knowledge) every pair of characters
+ // in unicode such that:
+ // for all i: tolower(kUpperToLowerMap[i].upper) = kUpperToLowerMap[i].lower
+ // This is then used to test that case-insensitive comparison of each pair
+ // returns 0 (that, that they are equal). After running the test on Mac OS X
+ // with the CFString API for comparision, the failing cases were commented
+ // out.
+ //
+ // Map of upper to lower case characters taken from
+ // ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
+ typedef struct {
+ uint32 upper; // the upper case character
+ uint32 lower; // the lower case character that upper maps to
+ } UpperToLowerMapEntry;
+ static const UpperToLowerMapEntry kUpperToLowerMap[] = {
+ // { UPPER, lower }, { UPPER, lower }, etc...
+ // some of these are commented out because they fail on some OS.
+ { 0x00041, 0x00061 }, { 0x00042, 0x00062 }, { 0x00043, 0x00063 },
+ { 0x00044, 0x00064 }, { 0x00045, 0x00065 }, { 0x00046, 0x00066 },
+ { 0x00047, 0x00067 }, { 0x00048, 0x00068 }, { 0x00049, 0x00069 },
+ { 0x0004A, 0x0006A }, { 0x0004B, 0x0006B }, { 0x0004C, 0x0006C },
+ { 0x0004D, 0x0006D }, { 0x0004E, 0x0006E }, { 0x0004F, 0x0006F },
+ { 0x00050, 0x00070 }, { 0x00051, 0x00071 }, { 0x00052, 0x00072 },
+ { 0x00053, 0x00073 }, { 0x00054, 0x00074 }, { 0x00055, 0x00075 },
+ { 0x00056, 0x00076 }, { 0x00057, 0x00077 }, { 0x00058, 0x00078 },
+ { 0x00059, 0x00079 }, { 0x0005A, 0x0007A }, { 0x000C0, 0x000E0 },
+ { 0x000C1, 0x000E1 }, { 0x000C2, 0x000E2 }, { 0x000C3, 0x000E3 },
+ { 0x000C4, 0x000E4 }, { 0x000C5, 0x000E5 }, { 0x000C6, 0x000E6 },
+ { 0x000C7, 0x000E7 }, { 0x000C8, 0x000E8 }, { 0x000C9, 0x000E9 },
+ { 0x000CA, 0x000EA }, { 0x000CB, 0x000EB }, { 0x000CC, 0x000EC },
+ { 0x000CD, 0x000ED }, { 0x000CE, 0x000EE }, { 0x000CF, 0x000EF },
+ { 0x000D0, 0x000F0 }, { 0x000D1, 0x000F1 }, { 0x000D2, 0x000F2 },
+ { 0x000D3, 0x000F3 }, { 0x000D4, 0x000F4 }, { 0x000D5, 0x000F5 },
+ { 0x000D6, 0x000F6 }, { 0x000D8, 0x000F8 }, { 0x000D9, 0x000F9 },
+ { 0x000DA, 0x000FA }, { 0x000DB, 0x000FB }, { 0x000DC, 0x000FC },
+ { 0x000DD, 0x000FD }, { 0x000DE, 0x000FE },
+ { 0x00100, 0x00101 }, { 0x00102, 0x00103 }, { 0x00104, 0x00105 },
+ { 0x00106, 0x00107 }, { 0x00108, 0x00109 }, { 0x0010A, 0x0010B },
+ { 0x0010C, 0x0010D }, { 0x0010E, 0x0010F }, { 0x00110, 0x00111 },
+ { 0x00112, 0x00113 }, { 0x00114, 0x00115 }, { 0x00116, 0x00117 },
+ { 0x00118, 0x00119 }, { 0x0011A, 0x0011B }, { 0x0011C, 0x0011D },
+ { 0x0011E, 0x0011F }, { 0x00120, 0x00121 }, { 0x00122, 0x00123 },
+ { 0x00124, 0x00125 }, { 0x00126, 0x00127 }, { 0x00128, 0x00129 },
+ { 0x0012A, 0x0012B }, { 0x0012C, 0x0012D }, { 0x0012E, 0x0012F },
+ /*{ 0x00130, 0x00069 },*/ { 0x00132, 0x00133 }, { 0x00134, 0x00135 },
+ { 0x00136, 0x00137 }, { 0x00139, 0x0013A }, { 0x0013B, 0x0013C },
+ { 0x0013D, 0x0013E }, { 0x0013F, 0x00140 }, { 0x00141, 0x00142 },
+ { 0x00143, 0x00144 }, { 0x00145, 0x00146 }, { 0x00147, 0x00148 },
+ { 0x0014A, 0x0014B }, { 0x0014C, 0x0014D }, { 0x0014E, 0x0014F },
+ { 0x00150, 0x00151 }, { 0x00152, 0x00153 }, { 0x00154, 0x00155 },
+ { 0x00156, 0x00157 }, { 0x00158, 0x00159 }, { 0x0015A, 0x0015B },
+ { 0x0015C, 0x0015D }, { 0x0015E, 0x0015F }, { 0x00160, 0x00161 },
+ { 0x00162, 0x00163 }, { 0x00164, 0x00165 }, { 0x00166, 0x00167 },
+ { 0x00168, 0x00169 }, { 0x0016A, 0x0016B }, { 0x0016C, 0x0016D },
+ { 0x0016E, 0x0016F }, { 0x00170, 0x00171 }, { 0x00172, 0x00173 },
+ { 0x00174, 0x00175 }, { 0x00176, 0x00177 }, { 0x00178, 0x000FF },
+ { 0x00179, 0x0017A }, { 0x0017B, 0x0017C }, { 0x0017D, 0x0017E },
+ { 0x00181, 0x00253 }, { 0x00182, 0x00183 }, { 0x00184, 0x00185 },
+ { 0x00186, 0x00254 }, { 0x00187, 0x00188 }, { 0x00189, 0x00256 },
+ { 0x0018A, 0x00257 }, { 0x0018B, 0x0018C }, { 0x0018E, 0x001DD },
+ { 0x0018F, 0x00259 }, { 0x00190, 0x0025B }, { 0x00191, 0x00192 },
+ { 0x00193, 0x00260 }, { 0x00194, 0x00263 }, { 0x00196, 0x00269 },
+ { 0x00197, 0x00268 }, { 0x00198, 0x00199 }, { 0x0019C, 0x0026F },
+ { 0x0019D, 0x00272 }, { 0x0019F, 0x00275 }, { 0x001A0, 0x001A1 },
+ { 0x001A2, 0x001A3 }, { 0x001A4, 0x001A5 }, { 0x001A6, 0x00280 },
+ { 0x001A7, 0x001A8 }, { 0x001A9, 0x00283 }, { 0x001AC, 0x001AD },
+ { 0x001AE, 0x00288 }, { 0x001AF, 0x001B0 }, { 0x001B1, 0x0028A },
+ { 0x001B2, 0x0028B }, { 0x001B3, 0x001B4 }, { 0x001B5, 0x001B6 },
+ { 0x001B7, 0x00292 }, { 0x001B8, 0x001B9 }, { 0x001BC, 0x001BD },
+ { 0x001C4, 0x001C6 }, { 0x001C7, 0x001C9 }, { 0x001CA, 0x001CC },
+ { 0x001CD, 0x001CE }, { 0x001CF, 0x001D0 }, { 0x001D1, 0x001D2 },
+ { 0x001D3, 0x001D4 }, { 0x001D5, 0x001D6 }, { 0x001D7, 0x001D8 },
+ { 0x001D9, 0x001DA }, { 0x001DB, 0x001DC }, { 0x001DE, 0x001DF },
+ { 0x001E0, 0x001E1 }, { 0x001E2, 0x001E3 }, { 0x001E4, 0x001E5 },
+ { 0x001E6, 0x001E7 }, { 0x001E8, 0x001E9 }, { 0x001EA, 0x001EB },
+ { 0x001EC, 0x001ED }, { 0x001EE, 0x001EF }, { 0x001F1, 0x001F3 },
+ { 0x001F4, 0x001F5 }, { 0x001F6, 0x00195 }, { 0x001F7, 0x001BF },
+ { 0x001F8, 0x001F9 }, { 0x001FA, 0x001FB }, { 0x001FC, 0x001FD },
+ { 0x001FE, 0x001FF }, { 0x00200, 0x00201 }, { 0x00202, 0x00203 },
+ { 0x00204, 0x00205 }, { 0x00206, 0x00207 }, { 0x00208, 0x00209 },
+ { 0x0020A, 0x0020B }, { 0x0020C, 0x0020D }, { 0x0020E, 0x0020F },
+ { 0x00210, 0x00211 }, { 0x00212, 0x00213 }, { 0x00214, 0x00215 },
+ { 0x00216, 0x00217 }, { 0x00218, 0x00219 }, { 0x0021A, 0x0021B },
+ { 0x0021C, 0x0021D }, { 0x0021E, 0x0021F }, { 0x00220, 0x0019E },
+ { 0x00222, 0x00223 }, { 0x00224, 0x00225 }, { 0x00226, 0x00227 },
+ { 0x00228, 0x00229 }, { 0x0022A, 0x0022B }, { 0x0022C, 0x0022D },
+ { 0x0022E, 0x0022F }, { 0x00230, 0x00231 }, { 0x00232, 0x00233 },
+ /*{ 0x0023B, 0x0023C }, { 0x0023D, 0x0019A }, { 0x00241, 0x00294 }, */
+ { 0x00386, 0x003AC }, { 0x00388, 0x003AD }, { 0x00389, 0x003AE },
+ { 0x0038A, 0x003AF }, { 0x0038C, 0x003CC }, { 0x0038E, 0x003CD },
+ { 0x0038F, 0x003CE }, { 0x00391, 0x003B1 }, { 0x00392, 0x003B2 },
+ { 0x00393, 0x003B3 }, { 0x00394, 0x003B4 }, { 0x00395, 0x003B5 },
+ { 0x00396, 0x003B6 }, { 0x00397, 0x003B7 }, { 0x00398, 0x003B8 },
+ { 0x00399, 0x003B9 }, { 0x0039A, 0x003BA }, { 0x0039B, 0x003BB },
+ { 0x0039C, 0x003BC }, { 0x0039D, 0x003BD }, { 0x0039E, 0x003BE },
+ { 0x0039F, 0x003BF }, { 0x003A0, 0x003C0 }, { 0x003A1, 0x003C1 },
+ { 0x003A3, 0x003C3 }, { 0x003A4, 0x003C4 }, { 0x003A5, 0x003C5 },
+ { 0x003A6, 0x003C6 }, { 0x003A7, 0x003C7 }, { 0x003A8, 0x003C8 },
+ { 0x003A9, 0x003C9 }, { 0x003AA, 0x003CA }, { 0x003AB, 0x003CB },
+ { 0x003D8, 0x003D9 }, { 0x003DA, 0x003DB }, { 0x003DC, 0x003DD },
+ { 0x003DE, 0x003DF }, { 0x003E0, 0x003E1 }, { 0x003E2, 0x003E3 },
+ { 0x003E4, 0x003E5 }, { 0x003E6, 0x003E7 }, { 0x003E8, 0x003E9 },
+ { 0x003EA, 0x003EB }, { 0x003EC, 0x003ED }, { 0x003EE, 0x003EF },
+ { 0x003F4, 0x003B8 }, { 0x003F7, 0x003F8 }, { 0x003F9, 0x003F2 },
+ { 0x003FA, 0x003FB }, { 0x00400, 0x00450 }, { 0x00401, 0x00451 },
+ { 0x00402, 0x00452 }, { 0x00403, 0x00453 }, { 0x00404, 0x00454 },
+ { 0x00405, 0x00455 }, { 0x00406, 0x00456 }, { 0x00407, 0x00457 },
+ { 0x00408, 0x00458 }, { 0x00409, 0x00459 }, { 0x0040A, 0x0045A },
+ { 0x0040B, 0x0045B }, { 0x0040C, 0x0045C }, { 0x0040D, 0x0045D },
+ { 0x0040E, 0x0045E }, { 0x0040F, 0x0045F }, { 0x00410, 0x00430 },
+ { 0x00411, 0x00431 }, { 0x00412, 0x00432 }, { 0x00413, 0x00433 },
+ { 0x00414, 0x00434 }, { 0x00415, 0x00435 }, { 0x00416, 0x00436 },
+ { 0x00417, 0x00437 }, { 0x00418, 0x00438 }, { 0x00419, 0x00439 },
+ { 0x0041A, 0x0043A }, { 0x0041B, 0x0043B }, { 0x0041C, 0x0043C },
+ { 0x0041D, 0x0043D }, { 0x0041E, 0x0043E }, { 0x0041F, 0x0043F },
+ { 0x00420, 0x00440 }, { 0x00421, 0x00441 }, { 0x00422, 0x00442 },
+ { 0x00423, 0x00443 }, { 0x00424, 0x00444 }, { 0x00425, 0x00445 },
+ { 0x00426, 0x00446 }, { 0x00427, 0x00447 }, { 0x00428, 0x00448 },
+ { 0x00429, 0x00449 }, { 0x0042A, 0x0044A }, { 0x0042B, 0x0044B },
+ { 0x0042C, 0x0044C }, { 0x0042D, 0x0044D }, { 0x0042E, 0x0044E },
+ { 0x0042F, 0x0044F }, { 0x00460, 0x00461 }, { 0x00462, 0x00463 },
+ { 0x00464, 0x00465 }, { 0x00466, 0x00467 }, { 0x00468, 0x00469 },
+ { 0x0046A, 0x0046B }, { 0x0046C, 0x0046D }, { 0x0046E, 0x0046F },
+ { 0x00470, 0x00471 }, { 0x00472, 0x00473 }, { 0x00474, 0x00475 },
+ { 0x00476, 0x00477 }, { 0x00478, 0x00479 }, { 0x0047A, 0x0047B },
+ { 0x0047C, 0x0047D }, { 0x0047E, 0x0047F }, { 0x00480, 0x00481 },
+ { 0x0048A, 0x0048B }, { 0x0048C, 0x0048D }, { 0x0048E, 0x0048F },
+ { 0x00490, 0x00491 }, { 0x00492, 0x00493 }, { 0x00494, 0x00495 },
+ { 0x00496, 0x00497 }, { 0x00498, 0x00499 }, { 0x0049A, 0x0049B },
+ { 0x0049C, 0x0049D }, { 0x0049E, 0x0049F }, { 0x004A0, 0x004A1 },
+ { 0x004A2, 0x004A3 }, { 0x004A4, 0x004A5 }, { 0x004A6, 0x004A7 },
+ { 0x004A8, 0x004A9 }, { 0x004AA, 0x004AB }, { 0x004AC, 0x004AD },
+ { 0x004AE, 0x004AF }, { 0x004B0, 0x004B1 }, { 0x004B2, 0x004B3 },
+ { 0x004B4, 0x004B5 }, { 0x004B6, 0x004B7 }, { 0x004B8, 0x004B9 },
+ { 0x004BA, 0x004BB }, { 0x004BC, 0x004BD }, { 0x004BE, 0x004BF },
+ { 0x004C1, 0x004C2 }, { 0x004C3, 0x004C4 }, { 0x004C5, 0x004C6 },
+ { 0x004C7, 0x004C8 }, { 0x004C9, 0x004CA }, { 0x004CB, 0x004CC },
+ { 0x004CD, 0x004CE }, { 0x004D0, 0x004D1 }, { 0x004D2, 0x004D3 },
+ { 0x004D4, 0x004D5 }, { 0x004D6, 0x004D7 }, { 0x004D8, 0x004D9 },
+ { 0x004DA, 0x004DB }, { 0x004DC, 0x004DD }, { 0x004DE, 0x004DF },
+ { 0x004E0, 0x004E1 }, { 0x004E2, 0x004E3 }, { 0x004E4, 0x004E5 },
+ { 0x004E6, 0x004E7 }, { 0x004E8, 0x004E9 }, { 0x004EA, 0x004EB },
+ { 0x004EC, 0x004ED }, { 0x004EE, 0x004EF }, { 0x004F0, 0x004F1 },
+ { 0x004F2, 0x004F3 }, { 0x004F4, 0x004F5 }, /*{ 0x004F6, 0x004F7 }, */
+ { 0x004F8, 0x004F9 }, { 0x00500, 0x00501 }, { 0x00502, 0x00503 },
+ { 0x00504, 0x00505 }, { 0x00506, 0x00507 }, { 0x00508, 0x00509 },
+ { 0x0050A, 0x0050B }, { 0x0050C, 0x0050D }, { 0x0050E, 0x0050F },
+ { 0x00531, 0x00561 }, { 0x00532, 0x00562 }, { 0x00533, 0x00563 },
+ { 0x00534, 0x00564 }, { 0x00535, 0x00565 }, { 0x00536, 0x00566 },
+ { 0x00537, 0x00567 }, { 0x00538, 0x00568 }, { 0x00539, 0x00569 },
+ { 0x0053A, 0x0056A }, { 0x0053B, 0x0056B }, { 0x0053C, 0x0056C },
+ { 0x0053D, 0x0056D }, { 0x0053E, 0x0056E }, { 0x0053F, 0x0056F },
+ { 0x00540, 0x00570 }, { 0x00541, 0x00571 }, { 0x00542, 0x00572 },
+ { 0x00543, 0x00573 }, { 0x00544, 0x00574 }, { 0x00545, 0x00575 },
+ { 0x00546, 0x00576 }, { 0x00547, 0x00577 }, { 0x00548, 0x00578 },
+ { 0x00549, 0x00579 }, { 0x0054A, 0x0057A }, { 0x0054B, 0x0057B },
+ { 0x0054C, 0x0057C }, { 0x0054D, 0x0057D }, { 0x0054E, 0x0057E },
+ { 0x0054F, 0x0057F }, { 0x00550, 0x00580 }, { 0x00551, 0x00581 },
+ { 0x00552, 0x00582 }, { 0x00553, 0x00583 }, { 0x00554, 0x00584 },
+ { 0x00555, 0x00585 }, { 0x00556, 0x00586 }, /*{ 0x010A0, 0x02D00 },
+ { 0x010A1, 0x02D01 }, { 0x010A2, 0x02D02 }, { 0x010A3, 0x02D03 },
+ { 0x010A4, 0x02D04 }, { 0x010A5, 0x02D05 }, { 0x010A6, 0x02D06 },
+ { 0x010A7, 0x02D07 }, { 0x010A8, 0x02D08 }, { 0x010A9, 0x02D09 },
+ { 0x010AA, 0x02D0A }, { 0x010AB, 0x02D0B }, { 0x010AC, 0x02D0C },
+ { 0x010AD, 0x02D0D }, { 0x010AE, 0x02D0E }, { 0x010AF, 0x02D0F },
+ { 0x010B0, 0x02D10 }, { 0x010B1, 0x02D11 }, { 0x010B2, 0x02D12 },
+ { 0x010B3, 0x02D13 }, { 0x010B4, 0x02D14 }, { 0x010B5, 0x02D15 },
+ { 0x010B6, 0x02D16 }, { 0x010B7, 0x02D17 }, { 0x010B8, 0x02D18 },
+ { 0x010B9, 0x02D19 }, { 0x010BA, 0x02D1A }, { 0x010BB, 0x02D1B },
+ { 0x010BC, 0x02D1C }, { 0x010BD, 0x02D1D }, { 0x010BE, 0x02D1E },
+ { 0x010BF, 0x02D1F }, { 0x010C0, 0x02D20 }, { 0x010C1, 0x02D21 },
+ { 0x010C2, 0x02D22 }, { 0x010C3, 0x02D23 }, { 0x010C4, 0x02D24 },
+ { 0x010C5, 0x02D25 },*/ { 0x01E00, 0x01E01 }, { 0x01E02, 0x01E03 },
+ { 0x01E04, 0x01E05 }, { 0x01E06, 0x01E07 }, { 0x01E08, 0x01E09 },
+ { 0x01E0A, 0x01E0B }, { 0x01E0C, 0x01E0D }, { 0x01E0E, 0x01E0F },
+ { 0x01E10, 0x01E11 }, { 0x01E12, 0x01E13 }, { 0x01E14, 0x01E15 },
+ { 0x01E16, 0x01E17 }, { 0x01E18, 0x01E19 }, { 0x01E1A, 0x01E1B },
+ { 0x01E1C, 0x01E1D }, { 0x01E1E, 0x01E1F }, { 0x01E20, 0x01E21 },
+ { 0x01E22, 0x01E23 }, { 0x01E24, 0x01E25 }, { 0x01E26, 0x01E27 },
+ { 0x01E28, 0x01E29 }, { 0x01E2A, 0x01E2B }, { 0x01E2C, 0x01E2D },
+ { 0x01E2E, 0x01E2F }, { 0x01E30, 0x01E31 }, { 0x01E32, 0x01E33 },
+ { 0x01E34, 0x01E35 }, { 0x01E36, 0x01E37 }, { 0x01E38, 0x01E39 },
+ { 0x01E3A, 0x01E3B }, { 0x01E3C, 0x01E3D }, { 0x01E3E, 0x01E3F },
+ { 0x01E40, 0x01E41 }, { 0x01E42, 0x01E43 }, { 0x01E44, 0x01E45 },
+ { 0x01E46, 0x01E47 }, { 0x01E48, 0x01E49 }, { 0x01E4A, 0x01E4B },
+ { 0x01E4C, 0x01E4D }, { 0x01E4E, 0x01E4F }, { 0x01E50, 0x01E51 },
+ { 0x01E52, 0x01E53 }, { 0x01E54, 0x01E55 }, { 0x01E56, 0x01E57 },
+ { 0x01E58, 0x01E59 }, { 0x01E5A, 0x01E5B }, { 0x01E5C, 0x01E5D },
+ { 0x01E5E, 0x01E5F }, { 0x01E60, 0x01E61 }, { 0x01E62, 0x01E63 },
+ { 0x01E64, 0x01E65 }, { 0x01E66, 0x01E67 }, { 0x01E68, 0x01E69 },
+ { 0x01E6A, 0x01E6B }, { 0x01E6C, 0x01E6D }, { 0x01E6E, 0x01E6F },
+ { 0x01E70, 0x01E71 }, { 0x01E72, 0x01E73 }, { 0x01E74, 0x01E75 },
+ { 0x01E76, 0x01E77 }, { 0x01E78, 0x01E79 }, { 0x01E7A, 0x01E7B },
+ { 0x01E7C, 0x01E7D }, { 0x01E7E, 0x01E7F }, { 0x01E80, 0x01E81 },
+ { 0x01E82, 0x01E83 }, { 0x01E84, 0x01E85 }, { 0x01E86, 0x01E87 },
+ { 0x01E88, 0x01E89 }, { 0x01E8A, 0x01E8B }, { 0x01E8C, 0x01E8D },
+ { 0x01E8E, 0x01E8F }, { 0x01E90, 0x01E91 }, { 0x01E92, 0x01E93 },
+ { 0x01E94, 0x01E95 }, { 0x01EA0, 0x01EA1 }, { 0x01EA2, 0x01EA3 },
+ { 0x01EA4, 0x01EA5 }, { 0x01EA6, 0x01EA7 }, { 0x01EA8, 0x01EA9 },
+ { 0x01EAA, 0x01EAB }, { 0x01EAC, 0x01EAD }, { 0x01EAE, 0x01EAF },
+ { 0x01EB0, 0x01EB1 }, { 0x01EB2, 0x01EB3 }, { 0x01EB4, 0x01EB5 },
+ { 0x01EB6, 0x01EB7 }, { 0x01EB8, 0x01EB9 }, { 0x01EBA, 0x01EBB },
+ { 0x01EBC, 0x01EBD }, { 0x01EBE, 0x01EBF }, { 0x01EC0, 0x01EC1 },
+ { 0x01EC2, 0x01EC3 }, { 0x01EC4, 0x01EC5 }, { 0x01EC6, 0x01EC7 },
+ { 0x01EC8, 0x01EC9 }, { 0x01ECA, 0x01ECB }, { 0x01ECC, 0x01ECD },
+ { 0x01ECE, 0x01ECF }, { 0x01ED0, 0x01ED1 }, { 0x01ED2, 0x01ED3 },
+ { 0x01ED4, 0x01ED5 }, { 0x01ED6, 0x01ED7 }, { 0x01ED8, 0x01ED9 },
+ { 0x01EDA, 0x01EDB }, { 0x01EDC, 0x01EDD }, { 0x01EDE, 0x01EDF },
+ { 0x01EE0, 0x01EE1 }, { 0x01EE2, 0x01EE3 }, { 0x01EE4, 0x01EE5 },
+ { 0x01EE6, 0x01EE7 }, { 0x01EE8, 0x01EE9 }, { 0x01EEA, 0x01EEB },
+ { 0x01EEC, 0x01EED }, { 0x01EEE, 0x01EEF }, { 0x01EF0, 0x01EF1 },
+ { 0x01EF2, 0x01EF3 }, { 0x01EF4, 0x01EF5 }, { 0x01EF6, 0x01EF7 },
+ { 0x01EF8, 0x01EF9 }, { 0x01F08, 0x01F00 }, { 0x01F09, 0x01F01 },
+ { 0x01F0A, 0x01F02 }, { 0x01F0B, 0x01F03 }, { 0x01F0C, 0x01F04 },
+ { 0x01F0D, 0x01F05 }, { 0x01F0E, 0x01F06 }, { 0x01F0F, 0x01F07 },
+ { 0x01F18, 0x01F10 }, { 0x01F19, 0x01F11 }, { 0x01F1A, 0x01F12 },
+ { 0x01F1B, 0x01F13 }, { 0x01F1C, 0x01F14 }, { 0x01F1D, 0x01F15 },
+ { 0x01F28, 0x01F20 }, { 0x01F29, 0x01F21 }, { 0x01F2A, 0x01F22 },
+ { 0x01F2B, 0x01F23 }, { 0x01F2C, 0x01F24 }, { 0x01F2D, 0x01F25 },
+ { 0x01F2E, 0x01F26 }, { 0x01F2F, 0x01F27 }, { 0x01F38, 0x01F30 },
+ { 0x01F39, 0x01F31 }, { 0x01F3A, 0x01F32 }, { 0x01F3B, 0x01F33 },
+ { 0x01F3C, 0x01F34 }, { 0x01F3D, 0x01F35 }, { 0x01F3E, 0x01F36 },
+ { 0x01F3F, 0x01F37 }, { 0x01F48, 0x01F40 }, { 0x01F49, 0x01F41 },
+ { 0x01F4A, 0x01F42 }, { 0x01F4B, 0x01F43 }, { 0x01F4C, 0x01F44 },
+ { 0x01F4D, 0x01F45 }, { 0x01F59, 0x01F51 }, { 0x01F5B, 0x01F53 },
+ { 0x01F5D, 0x01F55 }, { 0x01F5F, 0x01F57 }, { 0x01F68, 0x01F60 },
+ { 0x01F69, 0x01F61 }, { 0x01F6A, 0x01F62 }, { 0x01F6B, 0x01F63 },
+ { 0x01F6C, 0x01F64 }, { 0x01F6D, 0x01F65 }, { 0x01F6E, 0x01F66 },
+ { 0x01F6F, 0x01F67 }, { 0x01F88, 0x01F80 }, { 0x01F89, 0x01F81 },
+ { 0x01F8A, 0x01F82 }, { 0x01F8B, 0x01F83 }, { 0x01F8C, 0x01F84 },
+ { 0x01F8D, 0x01F85 }, { 0x01F8E, 0x01F86 }, { 0x01F8F, 0x01F87 },
+ { 0x01F98, 0x01F90 }, { 0x01F99, 0x01F91 }, { 0x01F9A, 0x01F92 },
+ { 0x01F9B, 0x01F93 }, { 0x01F9C, 0x01F94 }, { 0x01F9D, 0x01F95 },
+ { 0x01F9E, 0x01F96 }, { 0x01F9F, 0x01F97 }, { 0x01FA8, 0x01FA0 },
+ { 0x01FA9, 0x01FA1 }, { 0x01FAA, 0x01FA2 }, { 0x01FAB, 0x01FA3 },
+ { 0x01FAC, 0x01FA4 }, { 0x01FAD, 0x01FA5 }, { 0x01FAE, 0x01FA6 },
+ { 0x01FAF, 0x01FA7 }, { 0x01FB8, 0x01FB0 }, { 0x01FB9, 0x01FB1 },
+ { 0x01FBA, 0x01F70 }, { 0x01FBB, 0x01F71 }, { 0x01FBC, 0x01FB3 },
+ { 0x01FC8, 0x01F72 }, { 0x01FC9, 0x01F73 }, { 0x01FCA, 0x01F74 },
+ { 0x01FCB, 0x01F75 }, { 0x01FCC, 0x01FC3 }, { 0x01FD8, 0x01FD0 },
+ { 0x01FD9, 0x01FD1 }, { 0x01FDA, 0x01F76 }, { 0x01FDB, 0x01F77 },
+ { 0x01FE8, 0x01FE0 }, { 0x01FE9, 0x01FE1 }, { 0x01FEA, 0x01F7A },
+ { 0x01FEB, 0x01F7B }, { 0x01FEC, 0x01FE5 }, { 0x01FF8, 0x01F78 },
+ { 0x01FF9, 0x01F79 }, { 0x01FFA, 0x01F7C }, { 0x01FFB, 0x01F7D },
+ { 0x01FFC, 0x01FF3 }, { 0x02126, 0x003C9 }, { 0x0212A, 0x0006B },
+ { 0x0212B, 0x000E5 }, { 0x02160, 0x02170 }, { 0x02161, 0x02171 },
+ { 0x02162, 0x02172 }, { 0x02163, 0x02173 }, { 0x02164, 0x02174 },
+ { 0x02165, 0x02175 }, { 0x02166, 0x02176 }, { 0x02167, 0x02177 },
+ { 0x02168, 0x02178 }, { 0x02169, 0x02179 }, { 0x0216A, 0x0217A },
+ { 0x0216B, 0x0217B }, { 0x0216C, 0x0217C }, { 0x0216D, 0x0217D },
+ { 0x0216E, 0x0217E }, { 0x0216F, 0x0217F }, { 0x024B6, 0x024D0 },
+ { 0x024B7, 0x024D1 }, { 0x024B8, 0x024D2 }, { 0x024B9, 0x024D3 },
+ { 0x024BA, 0x024D4 }, { 0x024BB, 0x024D5 }, { 0x024BC, 0x024D6 },
+ { 0x024BD, 0x024D7 }, { 0x024BE, 0x024D8 }, { 0x024BF, 0x024D9 },
+ { 0x024C0, 0x024DA }, { 0x024C1, 0x024DB }, { 0x024C2, 0x024DC },
+ { 0x024C3, 0x024DD }, { 0x024C4, 0x024DE }, { 0x024C5, 0x024DF },
+ { 0x024C6, 0x024E0 }, { 0x024C7, 0x024E1 }, { 0x024C8, 0x024E2 },
+ { 0x024C9, 0x024E3 }, { 0x024CA, 0x024E4 }, { 0x024CB, 0x024E5 },
+ { 0x024CC, 0x024E6 }, { 0x024CD, 0x024E7 }, { 0x024CE, 0x024E8 },
+ { 0x024CF, 0x024E9 }, /*{ 0x02C00, 0x02C30 }, { 0x02C01, 0x02C31 },
+ { 0x02C02, 0x02C32 }, { 0x02C03, 0x02C33 }, { 0x02C04, 0x02C34 },
+ { 0x02C05, 0x02C35 }, { 0x02C06, 0x02C36 }, { 0x02C07, 0x02C37 },
+ { 0x02C08, 0x02C38 }, { 0x02C09, 0x02C39 }, { 0x02C0A, 0x02C3A },
+ { 0x02C0B, 0x02C3B }, { 0x02C0C, 0x02C3C }, { 0x02C0D, 0x02C3D },
+ { 0x02C0E, 0x02C3E }, { 0x02C0F, 0x02C3F }, { 0x02C10, 0x02C40 },
+ { 0x02C11, 0x02C41 }, { 0x02C12, 0x02C42 }, { 0x02C13, 0x02C43 },
+ { 0x02C14, 0x02C44 }, { 0x02C15, 0x02C45 }, { 0x02C16, 0x02C46 },
+ { 0x02C17, 0x02C47 }, { 0x02C18, 0x02C48 }, { 0x02C19, 0x02C49 },
+ { 0x02C1A, 0x02C4A }, { 0x02C1B, 0x02C4B }, { 0x02C1C, 0x02C4C },
+ { 0x02C1D, 0x02C4D }, { 0x02C1E, 0x02C4E }, { 0x02C1F, 0x02C4F },
+ { 0x02C20, 0x02C50 }, { 0x02C21, 0x02C51 }, { 0x02C22, 0x02C52 },
+ { 0x02C23, 0x02C53 }, { 0x02C24, 0x02C54 }, { 0x02C25, 0x02C55 },
+ { 0x02C26, 0x02C56 }, { 0x02C27, 0x02C57 }, { 0x02C28, 0x02C58 },
+ { 0x02C29, 0x02C59 }, { 0x02C2A, 0x02C5A }, { 0x02C2B, 0x02C5B },
+ { 0x02C2C, 0x02C5C }, { 0x02C2D, 0x02C5D }, { 0x02C2E, 0x02C5E },
+ { 0x02C80, 0x02C81 }, { 0x02C82, 0x02C83 }, { 0x02C84, 0x02C85 },
+ { 0x02C86, 0x02C87 }, { 0x02C88, 0x02C89 }, { 0x02C8A, 0x02C8B },
+ { 0x02C8C, 0x02C8D }, { 0x02C8E, 0x02C8F }, { 0x02C90, 0x02C91 },
+ { 0x02C92, 0x02C93 }, { 0x02C94, 0x02C95 }, { 0x02C96, 0x02C97 },
+ { 0x02C98, 0x02C99 }, { 0x02C9A, 0x02C9B }, { 0x02C9C, 0x02C9D },
+ { 0x02C9E, 0x02C9F }, { 0x02CA0, 0x02CA1 }, { 0x02CA2, 0x02CA3 },
+ { 0x02CA4, 0x02CA5 }, { 0x02CA6, 0x02CA7 }, { 0x02CA8, 0x02CA9 },
+ { 0x02CAA, 0x02CAB }, { 0x02CAC, 0x02CAD }, { 0x02CAE, 0x02CAF },
+ { 0x02CB0, 0x02CB1 }, { 0x02CB2, 0x02CB3 }, { 0x02CB4, 0x02CB5 },
+ { 0x02CB6, 0x02CB7 }, { 0x02CB8, 0x02CB9 }, { 0x02CBA, 0x02CBB },
+ { 0x02CBC, 0x02CBD }, { 0x02CBE, 0x02CBF }, { 0x02CC0, 0x02CC1 },
+ { 0x02CC2, 0x02CC3 }, { 0x02CC4, 0x02CC5 }, { 0x02CC6, 0x02CC7 },
+ { 0x02CC8, 0x02CC9 }, { 0x02CCA, 0x02CCB }, { 0x02CCC, 0x02CCD },
+ { 0x02CCE, 0x02CCF }, { 0x02CD0, 0x02CD1 }, { 0x02CD2, 0x02CD3 },
+ { 0x02CD4, 0x02CD5 }, { 0x02CD6, 0x02CD7 }, { 0x02CD8, 0x02CD9 },
+ { 0x02CDA, 0x02CDB }, { 0x02CDC, 0x02CDD }, { 0x02CDE, 0x02CDF },
+ { 0x02CE0, 0x02CE1 }, { 0x02CE2, 0x02CE3 },*/ { 0x0FF21, 0x0FF41 },
+ { 0x0FF22, 0x0FF42 }, { 0x0FF23, 0x0FF43 }, { 0x0FF24, 0x0FF44 },
+ { 0x0FF25, 0x0FF45 }, { 0x0FF26, 0x0FF46 }, { 0x0FF27, 0x0FF47 },
+ { 0x0FF28, 0x0FF48 }, { 0x0FF29, 0x0FF49 }, { 0x0FF2A, 0x0FF4A },
+ { 0x0FF2B, 0x0FF4B }, { 0x0FF2C, 0x0FF4C }, { 0x0FF2D, 0x0FF4D },
+ { 0x0FF2E, 0x0FF4E }, { 0x0FF2F, 0x0FF4F }, { 0x0FF30, 0x0FF50 },
+ { 0x0FF31, 0x0FF51 }, { 0x0FF32, 0x0FF52 }, { 0x0FF33, 0x0FF53 },
+ { 0x0FF34, 0x0FF54 }, { 0x0FF35, 0x0FF55 }, { 0x0FF36, 0x0FF56 },
+ { 0x0FF37, 0x0FF57 }, { 0x0FF38, 0x0FF58 }, { 0x0FF39, 0x0FF59 },
+ // the following commented out ones fail on OS X 10.5 Leopard
+ { 0x0FF3A, 0x0FF5A }/*, { 0x10400, 0x10428 }, { 0x10401, 0x10429 },
+ { 0x10402, 0x1042A }, { 0x10403, 0x1042B }, { 0x10404, 0x1042C },
+ { 0x10405, 0x1042D }, { 0x10406, 0x1042E }, { 0x10407, 0x1042F },
+ { 0x10408, 0x10430 }, { 0x10409, 0x10431 }, { 0x1040A, 0x10432 },
+ { 0x1040B, 0x10433 }, { 0x1040C, 0x10434 }, { 0x1040D, 0x10435 },
+ { 0x1040E, 0x10436 }, { 0x1040F, 0x10437 }, { 0x10410, 0x10438 },
+ { 0x10411, 0x10439 }, { 0x10412, 0x1043A }, { 0x10413, 0x1043B },
+ { 0x10414, 0x1043C }, { 0x10415, 0x1043D }, { 0x10416, 0x1043E },
+ { 0x10417, 0x1043F }, { 0x10418, 0x10440 }, { 0x10419, 0x10441 },
+ { 0x1041A, 0x10442 }, { 0x1041B, 0x10443 }, { 0x1041C, 0x10444 },
+ { 0x1041D, 0x10445 }, { 0x1041E, 0x10446 }, { 0x1041F, 0x10447 },
+ { 0x10420, 0x10448 }, { 0x10421, 0x10449 }, { 0x10422, 0x1044A },
+ { 0x10423, 0x1044B }, { 0x10424, 0x1044C }, { 0x10425, 0x1044D },
+ { 0x10426, 0x1044E }, { 0x10427, 0x1044F } */
+ };
+ unsigned char utf8str_upper[5];
+ unsigned char utf8str_lower[5];
+ for (int i = 0; i < ARRAYSIZE(kUpperToLowerMap); i++) {
+ int len;
+ len = UTF32ToUTF8(kUpperToLowerMap[i].upper, utf8str_upper);
+ CHECK_NE(0, len);
+ utf8str_upper[len] = '\0';
+ len = UTF32ToUTF8(kUpperToLowerMap[i].lower, utf8str_lower);
+ CHECK_NE(0, len);
+ utf8str_lower[len] = '\0';
+ int result = ComparePathNames(
+ UTF8ToPathStringQuick(reinterpret_cast<char*>(utf8str_upper)),
+ UTF8ToPathStringQuick(reinterpret_cast<char*>(utf8str_lower)));
+ if (0 != result) {
+ // This ugly strstream works around an issue where using << hex on the
+ // stream for ADD_FAILURE produces "true" and "false" in the output.
+ strstream msg;
+ msg << "ComparePathNames(0x" << hex << kUpperToLowerMap[i].upper
+ << ", 0x" << hex << kUpperToLowerMap[i].lower
+ << ") returned " << dec << result << "; expected 0" << '\0';
+ ADD_FAILURE() << msg.str();
+ }
+ }
+#endif // not defined OS_WINDOWS
+}
+
+#ifdef OS_WINDOWS
+TEST(Syncable, PathNameMatch) {
+ // basic stuff, not too many otherwise we're testing the os.
+ EXPECT_TRUE(PathNameMatch(PSTR("bob"), PSTR("bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("fred")));
+ // Test our ; extension.
+ EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo;b")));
+ EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo*")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co;b")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co*")));
+ // Test our fixes for prepended spaces.
+ EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bo*")));
+ EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR(" bob")));
+ EXPECT_FALSE(PathNameMatch(PSTR(" bob"), PSTR("bob")));
+ // Combo test
+ EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;o*")));
+ EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;ob")));
+ EXPECT_FALSE(PathNameMatch(PSTR("b;ob"), PSTR(" b;ob")));
+ EXPECT_FALSE(PathNameMatch(PSTR(" b;ob"), PSTR("b;ob")));
+ // other whitespace should give no matches.
+ EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("\tbob")));
+}
+#endif // OS_WINDOWS
+
+} // namespace
+
+void FakeSync(MutableEntry* e, const char* fake_id) {
+ e->Put(IS_UNSYNCED, false);
+ e->Put(BASE_VERSION, 2);
+ e->Put(ID, Id::CreateFromServerId(fake_id));
+}
+
+TEST_F(SyncableDirectoryTest, Bug1509232) {
+ const PathString a = PSTR("alpha");
+
+ CreateEntry(a, dir_.get()->NextId());
+ {
+ WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
+ MutableEntry e(&trans, GET_BY_PATH, a);
+ ASSERT_TRUE(e.good());
+ ExtendedAttributeKey key(e.Get(META_HANDLE), PSTR("resourcefork"));
+ MutableExtendedAttribute ext(&trans, CREATE, key);
+ ASSERT_TRUE(ext.good());
+ const char value[] = "stuff";
+ Blob value_blob(value, value + ARRAYSIZE(value));
+ ext.mutable_value()->swap(value_blob);
+ ext.delete_attribute();
+ }
+ // This call to SaveChanges used to CHECK fail.
+ dir_.get()->SaveChanges();
+}
+
+} // namespace syncable
+
+#ifdef OS_WINDOWS
+class LocalModule : public CAtlExeModuleT<LocalModule> { };
+LocalModule module_;
+
+int main(int argc, char* argv[]) {
+ testing::InitGoogleTest(&argc, argv);
+
+ // TODO(chron) Add method to change random seed.
+ const int32 test_random_seed = time(NULL);
+ cout << "Random seed: " << test_random_seed << endl;
+ LOG(INFO) << "Random seed: " << test_random_seed << endl;
+ srand(test_random_seed);
+
+ // Necessary for NewCallback, scoped to main
+ base::AtExitManager at_exit_manager;
+
+ int result = RUN_ALL_TESTS();
+ return result;
+}
+#endif