summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormaniscalco <maniscalco@chromium.org>2015-04-03 16:44:50 -0700
committerCommit bot <commit-bot@chromium.org>2015-04-03 23:46:02 +0000
commit089b9366f4cb360bda3245d8c831e3362bfc839c (patch)
tree00c031bf2942e5a6954edbd0171088a6e81eb7b9
parent458cabc6e4595b530a2f2ac11d2e17587e3faf71 (diff)
downloadchromium_src-089b9366f4cb360bda3245d8c831e3362bfc839c.zip
chromium_src-089b9366f4cb360bda3245d8c831e3362bfc839c.tar.gz
chromium_src-089b9366f4cb360bda3245d8c831e3362bfc839c.tar.bz2
[Sync] Eliminate friends from Directory by exposing kernel via accessor
This is a minor refactoring of Directory that eliminates non-test friends and provides access to Directory's kernel using public accessor methods. Make Directory::Close private as it's automatically called from the dtor. Make Directory::Kernel public instead of having Directory be-friend several classes. Make Directory::Kernel::mutex mutable to eliminate the need for a const_cast in ScopedKernelLock. Make OnDiskDirectoryBackingStore::TryLoad private. Review URL: https://codereview.chromium.org/1057663002 Cr-Commit-Position: refs/heads/master@{#323850}
-rw-r--r--sync/internal_api/sync_rollback_manager_base.cc1
-rw-r--r--sync/syncable/directory.cc9
-rw-r--r--sync/syncable/directory.h258
-rw-r--r--sync/syncable/directory_unittest.cc16
-rw-r--r--sync/syncable/model_neutral_mutable_entry.cc66
-rw-r--r--sync/syncable/mutable_entry.cc24
-rw-r--r--sync/syncable/on_disk_directory_backing_store.h12
-rw-r--r--sync/syncable/scoped_kernel_lock.cc2
-rw-r--r--sync/syncable/scoped_kernel_lock.h2
-rw-r--r--sync/syncable/syncable_base_transaction.cc4
-rw-r--r--sync/syncable/syncable_unittest.cc20
-rw-r--r--sync/syncable/syncable_write_transaction.cc14
12 files changed, 217 insertions, 211 deletions
diff --git a/sync/internal_api/sync_rollback_manager_base.cc b/sync/internal_api/sync_rollback_manager_base.cc
index 587ea56..80a63c5 100644
--- a/sync/internal_api/sync_rollback_manager_base.cc
+++ b/sync/internal_api/sync_rollback_manager_base.cc
@@ -137,7 +137,6 @@ void SyncRollbackManagerBase::SaveChanges() {
void SyncRollbackManagerBase::ShutdownOnSyncThread(ShutdownReason reason) {
if (initialized_) {
- share_.directory->Close();
share_.directory.reset();
initialized_ = false;
}
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index e603e4e..45e0798 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -189,6 +189,7 @@ DirOpenResult Directory::OpenImpl(
if (OPENED != result)
return result;
+ DCHECK(!kernel_);
kernel_ = new Kernel(name, info, delegate, transaction_observer);
delete_journal_.reset(new DeleteJournal(&delete_journals));
InitializeIndices(&tmp_handles_map);
@@ -1548,5 +1549,13 @@ void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
std::back_inserter(*ids));
}
+Directory::Kernel* Directory::kernel() {
+ return kernel_;
+}
+
+const Directory::Kernel* Directory::kernel() const {
+ return kernel_;
+}
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index b256117..9bf90c1 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -51,15 +51,10 @@ enum InvariantCheckLevel {
// Directory stores and manages EntryKernels.
//
-// This class is tightly coupled to several other classes (see friends).
+// This class is tightly coupled to several other classes via Directory::Kernel.
+// Although Directory's kernel_ is exposed via public accessor it should be
+// treated as pseudo-private.
class SYNC_EXPORT Directory {
- friend class BaseTransaction;
- friend class Entry;
- friend class ModelNeutralMutableEntry;
- friend class MutableEntry;
- friend class ReadTransaction;
- friend class ScopedKernelLock;
- friend class WriteTransaction;
friend class SyncableDirectoryTest;
friend class syncer::TestUserShare;
FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, ManageDeleteJournals);
@@ -160,6 +155,105 @@ class SYNC_EXPORT Directory {
MetahandleSet delete_journals_to_purge;
};
+ struct Kernel {
+ // |delegate| must not be NULL. |transaction_observer| must be
+ // initialized.
+ Kernel(const std::string& name, const KernelLoadInfo& info,
+ DirectoryChangeDelegate* delegate,
+ const WeakHandle<TransactionObserver>& transaction_observer);
+
+ ~Kernel();
+
+ // Implements ReadTransaction / WriteTransaction using a simple lock.
+ base::Lock transaction_mutex;
+
+ // Protected by transaction_mutex. Used by WriteTransactions.
+ int64 next_write_transaction_id;
+
+ // The name of this directory.
+ std::string const name;
+
+ // Protects all members below.
+ // The mutex effectively protects all the indices, but not the
+ // entries themselves. So once a pointer to an entry is pulled
+ // from the index, the mutex can be unlocked and entry read or written.
+ //
+ // Never hold the mutex and do anything with the database or any
+ // other buffered IO. Violating this rule will result in deadlock.
+ mutable base::Lock mutex;
+
+ // Entries indexed by metahandle. This container is considered to be the
+ // owner of all EntryKernels, which may be referened by the other
+ // containers. If you remove an EntryKernel from this map, you probably
+ // want to remove it from all other containers and delete it, too.
+ MetahandlesMap metahandles_map;
+
+ // Entries indexed by id
+ IdsMap ids_map;
+
+ // Entries indexed by server tag.
+ // This map does not include any entries with non-existent server tags.
+ TagsMap server_tags_map;
+
+ // Entries indexed by client tag.
+ // This map does not include any entries with non-existent client tags.
+ // IS_DEL items are included.
+ TagsMap client_tags_map;
+
+ // Contains non-deleted items, indexed according to parent and position
+ // within parent. Protected by the ScopedKernelLock.
+ ParentChildIndex parent_child_index;
+
+ // This index keeps track of which metahandles refer to a given attachment.
+ // Think of it as the inverse of EntryKernel's AttachmentMetadata Records.
+ //
+ // Because entries can be undeleted (e.g. PutIsDel(false)), entries should
+ // not removed from the index until they are actually deleted from memory.
+ //
+ // All access should go through IsAttachmentLinked,
+ // RemoveFromAttachmentIndex, AddToAttachmentIndex, and
+ // UpdateAttachmentIndex methods to avoid iterator invalidation errors.
+ IndexByAttachmentId index_by_attachment_id;
+
+ // 3 in-memory indices on bits used extremely frequently by the syncer.
+ // |unapplied_update_metahandles| is keyed by the server model type.
+ MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
+ MetahandleSet unsynced_metahandles;
+ // Contains metahandles that are most likely dirty (though not
+ // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
+ MetahandleSet dirty_metahandles;
+
+ // When a purge takes place, we remove items from all our indices and stash
+ // them in here so that SaveChanges can persist their permanent deletion.
+ MetahandleSet metahandles_to_purge;
+
+ KernelShareInfoStatus info_status;
+
+ // These 3 members are backed in the share_info table, and
+ // their state is marked by the flag above.
+
+ // A structure containing the Directory state that is written back into the
+ // database on SaveChanges.
+ PersistedKernelInfo persisted_info;
+
+ // A unique identifier for this account's cache db, used to generate
+ // unique server IDs. No need to lock, only written at init time.
+ const std::string cache_guid;
+
+ // It doesn't make sense for two threads to run SaveChanges at the same
+ // time; this mutex protects that activity.
+ base::Lock save_changes_mutex;
+
+ // The next metahandle is protected by kernel mutex.
+ int64 next_metahandle;
+
+ // The delegate for directory change events. Must not be NULL.
+ DirectoryChangeDelegate* const delegate;
+
+ // The transaction observer.
+ const WeakHandle<TransactionObserver> transaction_observer;
+ };
+
// Does not take ownership of |encryptor|.
// |report_unrecoverable_error_function| may be NULL.
// Takes ownership of |store|.
@@ -181,10 +275,6 @@ class SYNC_EXPORT Directory {
const WeakHandle<TransactionObserver>&
transaction_observer);
- // Stops sending events to the delegate and the transaction
- // observer.
- void Close();
-
int64 NextMetahandle();
// Returns a negative integer unique to this client.
syncable::Id NextId();
@@ -423,139 +513,47 @@ class SYNC_EXPORT Directory {
ModelType type,
AttachmentIdList* ids);
- private:
- struct Kernel {
- // |delegate| must not be NULL. |transaction_observer| must be
- // initialized.
- Kernel(const std::string& name, const KernelLoadInfo& info,
- DirectoryChangeDelegate* delegate,
- const WeakHandle<TransactionObserver>& transaction_observer);
-
- ~Kernel();
-
- // Implements ReadTransaction / WriteTransaction using a simple lock.
- base::Lock transaction_mutex;
-
- // Protected by transaction_mutex. Used by WriteTransactions.
- int64 next_write_transaction_id;
-
- // The name of this directory.
- std::string const name;
-
- // Protects all members below.
- // The mutex effectively protects all the indices, but not the
- // entries themselves. So once a pointer to an entry is pulled
- // from the index, the mutex can be unlocked and entry read or written.
- //
- // Never hold the mutex and do anything with the database or any
- // other buffered IO. Violating this rule will result in deadlock.
- base::Lock mutex;
-
- // Entries indexed by metahandle. This container is considered to be the
- // owner of all EntryKernels, which may be referened by the other
- // containers. If you remove an EntryKernel from this map, you probably
- // want to remove it from all other containers and delete it, too.
- MetahandlesMap metahandles_map;
-
- // Entries indexed by id
- IdsMap ids_map;
-
- // Entries indexed by server tag.
- // This map does not include any entries with non-existent server tags.
- TagsMap server_tags_map;
-
- // Entries indexed by client tag.
- // This map does not include any entries with non-existent client tags.
- // IS_DEL items are included.
- TagsMap client_tags_map;
-
- // Contains non-deleted items, indexed according to parent and position
- // within parent. Protected by the ScopedKernelLock.
- ParentChildIndex parent_child_index;
-
- // This index keeps track of which metahandles refer to a given attachment.
- // Think of it as the inverse of EntryKernel's AttachmentMetadata Records.
- //
- // Because entries can be undeleted (e.g. PutIsDel(false)), entries should
- // not removed from the index until they are actually deleted from memory.
- //
- // All access should go through IsAttachmentLinked,
- // RemoveFromAttachmentIndex, AddToAttachmentIndex, and
- // UpdateAttachmentIndex methods to avoid iterator invalidation errors.
- IndexByAttachmentId index_by_attachment_id;
-
- // 3 in-memory indices on bits used extremely frequently by the syncer.
- // |unapplied_update_metahandles| is keyed by the server model type.
- MetahandleSet unapplied_update_metahandles[MODEL_TYPE_COUNT];
- MetahandleSet unsynced_metahandles;
- // Contains metahandles that are most likely dirty (though not
- // necessarily). Dirtyness is confirmed in TakeSnapshotForSaveChanges().
- MetahandleSet dirty_metahandles;
-
- // When a purge takes place, we remove items from all our indices and stash
- // them in here so that SaveChanges can persist their permanent deletion.
- MetahandleSet metahandles_to_purge;
-
- KernelShareInfoStatus info_status;
-
- // These 3 members are backed in the share_info table, and
- // their state is marked by the flag above.
+ // For new entry creation only.
+ bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
- // A structure containing the Directory state that is written back into the
- // database on SaveChanges.
- PersistedKernelInfo persisted_info;
+ // Update the attachment index for |metahandle| removing it from the index
+ // under |old_metadata| entries and add it under |new_metadata| entries.
+ void UpdateAttachmentIndex(const int64 metahandle,
+ const sync_pb::AttachmentMetadata& old_metadata,
+ const sync_pb::AttachmentMetadata& new_metadata);
- // A unique identifier for this account's cache db, used to generate
- // unique server IDs. No need to lock, only written at init time.
- const std::string cache_guid;
+ virtual EntryKernel* GetEntryById(const Id& id);
+ virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
+ EntryKernel* GetEntryByServerTag(const std::string& tag);
- // It doesn't make sense for two threads to run SaveChanges at the same
- // time; this mutex protects that activity.
- base::Lock save_changes_mutex;
+ virtual EntryKernel* GetEntryByHandle(int64 handle);
- // The next metahandle is protected by kernel mutex.
- int64 next_metahandle;
+ bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
+ const Id& new_id);
- // The delegate for directory change events. Must not be NULL.
- DirectoryChangeDelegate* const delegate;
+ bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
+ const Id& new_parent_id);
- // The transaction observer.
- const WeakHandle<TransactionObserver> transaction_observer;
- };
+ // Accessors for the underlying Kernel. Although these are public methods, the
+ // number of classes that call these should be limited.
+ Kernel* kernel();
+ const Kernel* kernel() const;
- // You'll notice that some of these methods have two forms. One that takes a
- // ScopedKernelLock and one that doesn't. The general pattern is that those
- // without a ScopedKernelLock parameter construct one internally before
- // calling the form that takes one.
+ private:
+ // You'll notice that some of the methods below are private overloads of the
+ // public ones declared above. The general pattern is that the public overload
+ // constructs a ScopedKernelLock before calling the corresponding private
+ // overload with the held ScopedKernelLock.
- virtual EntryKernel* GetEntryByHandle(int64 handle);
virtual EntryKernel* GetEntryByHandle(const ScopedKernelLock& lock,
int64 metahandle);
- virtual EntryKernel* GetEntryById(const Id& id);
virtual EntryKernel* GetEntryById(const ScopedKernelLock& lock, const Id& id);
- EntryKernel* GetEntryByServerTag(const std::string& tag);
- virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
-
- // For new entry creation only
- bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
bool InsertEntry(const ScopedKernelLock& lock,
BaseWriteTransaction* trans,
EntryKernel* entry);
- bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
- const Id& new_id);
-
- bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
- const Id& new_parent_id);
-
- // Update the attachment index for |metahandle| removing it from the index
- // under |old_metadata| entries and add it under |new_metadata| entries.
- void UpdateAttachmentIndex(const int64 metahandle,
- const sync_pb::AttachmentMetadata& old_metadata,
- const sync_pb::AttachmentMetadata& new_metadata);
-
// Remove each of |metahandle|'s attachment ids from index_by_attachment_id.
void RemoveFromAttachmentIndex(
const ScopedKernelLock& lock,
@@ -630,6 +628,10 @@ class SYNC_EXPORT Directory {
ModelType type,
std::vector<int64>* result);
+ // Stops sending events to the delegate and the transaction
+ // observer.
+ void Close();
+
Kernel* kernel_;
scoped_ptr<DirectoryBackingStore> store_;
diff --git a/sync/syncable/directory_unittest.cc b/sync/syncable/directory_unittest.cc
index 1d04c3a..3737ca0 100644
--- a/sync/syncable/directory_unittest.cc
+++ b/sync/syncable/directory_unittest.cc
@@ -155,7 +155,7 @@ void SyncableDirectoryTest::CheckPurgeEntriesWithTypeInSucceeded(
dir_->GetAllMetaHandles(&trans, &all_set);
EXPECT_EQ(4U, all_set.size());
if (before_reload)
- EXPECT_EQ(6U, dir_->kernel_->metahandles_to_purge.size());
+ EXPECT_EQ(6U, dir_->kernel()->metahandles_to_purge.size());
for (MetahandleSet::iterator iter = all_set.begin(); iter != all_set.end();
++iter) {
Entry e(&trans, GET_BY_HANDLE, *iter);
@@ -187,11 +187,11 @@ void SyncableDirectoryTest::CheckPurgeEntriesWithTypeInSucceeded(
}
bool SyncableDirectoryTest::IsInDirtyMetahandles(int64 metahandle) {
- return 1 == dir_->kernel_->dirty_metahandles.count(metahandle);
+ return 1 == dir_->kernel()->dirty_metahandles.count(metahandle);
}
bool SyncableDirectoryTest::IsInMetahandlesToPurge(int64 metahandle) {
- return 1 == dir_->kernel_->metahandles_to_purge.count(metahandle);
+ return 1 == dir_->kernel()->metahandles_to_purge.count(metahandle);
}
scoped_ptr<Directory>& SyncableDirectoryTest::dir() {
@@ -256,7 +256,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsMetahandlesToPurge) {
dir()->PurgeEntriesWithTypeIn(to_purge, ModelTypeSet(), ModelTypeSet());
Directory::SaveChangesSnapshot snapshot1;
- base::AutoLock scoped_lock(dir()->kernel_->save_changes_mutex);
+ base::AutoLock scoped_lock(dir()->kernel()->save_changes_mutex);
dir()->TakeSnapshotForSaveChanges(&snapshot1);
EXPECT_TRUE(expected_purges == snapshot1.metahandles_to_purge);
@@ -285,7 +285,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
// Fake SaveChanges() and make sure we got what we expected.
{
Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir()->kernel_->save_changes_mutex);
+ base::AutoLock scoped_lock(dir()->kernel()->save_changes_mutex);
dir()->TakeSnapshotForSaveChanges(&snapshot);
// Make sure there's an entry for each new metahandle. Make sure all
// entries are marked dirty.
@@ -321,7 +321,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsAllDirtyHandlesTest) {
// Fake SaveChanges() and make sure we got what we expected.
{
Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir()->kernel_->save_changes_mutex);
+ base::AutoLock scoped_lock(dir()->kernel()->save_changes_mutex);
dir()->TakeSnapshotForSaveChanges(&snapshot);
// Make sure there's an entry for each new metahandle. Make sure all
// entries are marked dirty.
@@ -388,7 +388,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
// Fake SaveChanges() and make sure we got what we expected.
{
Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir()->kernel_->save_changes_mutex);
+ base::AutoLock scoped_lock(dir()->kernel()->save_changes_mutex);
dir()->TakeSnapshotForSaveChanges(&snapshot);
// Make sure there are no dirty_metahandles.
EXPECT_EQ(0u, snapshot.dirty_metas.size());
@@ -415,7 +415,7 @@ TEST_F(SyncableDirectoryTest, TakeSnapshotGetsOnlyDirtyHandlesTest) {
// Fake SaveChanges() and make sure we got what we expected.
{
Directory::SaveChangesSnapshot snapshot;
- base::AutoLock scoped_lock(dir()->kernel_->save_changes_mutex);
+ base::AutoLock scoped_lock(dir()->kernel()->save_changes_mutex);
dir()->TakeSnapshotForSaveChanges(&snapshot);
// Make sure there's an entry for each changed metahandle. Make sure all
// entries are marked dirty.
diff --git a/sync/syncable/model_neutral_mutable_entry.cc b/sync/syncable/model_neutral_mutable_entry.cc
index 5064f4507..a7beadc 100644
--- a/sync/syncable/model_neutral_mutable_entry.cc
+++ b/sync/syncable/model_neutral_mutable_entry.cc
@@ -32,7 +32,7 @@ ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans,
kernel->put(ID, id);
kernel->put(META_HANDLE, trans->directory()->NextMetahandle());
- kernel->mark_dirty(&trans->directory()->kernel_->dirty_metahandles);
+ kernel->mark_dirty(&trans->directory()->kernel()->dirty_metahandles);
kernel->put(IS_DEL, true);
// We match the database defaults here
kernel->put(BASE_VERSION, CHANGES_VERSION);
@@ -69,7 +69,7 @@ ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans,
kernel->put(NON_UNIQUE_NAME, ModelTypeToString(type));
kernel->put(IS_DIR, true);
- kernel->mark_dirty(&trans->directory()->kernel_->dirty_metahandles);
+ kernel->mark_dirty(&trans->directory()->kernel()->dirty_metahandles);
if (!trans->directory()->InsertEntry(trans, kernel.get())) {
return; // Failed inserting.
@@ -105,7 +105,7 @@ void ModelNeutralMutableEntry::PutBaseVersion(int64 value) {
base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(BASE_VERSION) != value) {
kernel_->put(BASE_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -115,7 +115,7 @@ void ModelNeutralMutableEntry::PutServerVersion(int64 value) {
if (kernel_->ref(SERVER_VERSION) != value) {
ScopedKernelLock lock(dir());
kernel_->put(SERVER_VERSION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -124,7 +124,7 @@ void ModelNeutralMutableEntry::PutServerMtime(base::Time value) {
base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_MTIME) != value) {
kernel_->put(SERVER_MTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -133,7 +133,7 @@ void ModelNeutralMutableEntry::PutServerCtime(base::Time value) {
base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(SERVER_CTIME) != value) {
kernel_->put(SERVER_CTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -143,7 +143,7 @@ bool ModelNeutralMutableEntry::PutId(const Id& value) {
if (kernel_->ref(ID) != value) {
if (!dir()->ReindexId(base_write_transaction(), kernel_, value))
return false;
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
return true;
}
@@ -154,7 +154,7 @@ void ModelNeutralMutableEntry::PutServerParentId(const Id& value) {
if (kernel_->ref(SERVER_PARENT_ID) != value) {
kernel_->put(SERVER_PARENT_ID, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -162,7 +162,7 @@ bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
DCHECK(kernel_);
base_write_transaction_->TrackChangesTo(kernel_);
if (kernel_->ref(IS_UNSYNCED) != value) {
- MetahandleSet* index = &dir()->kernel_->unsynced_metahandles;
+ MetahandleSet* index = &dir()->kernel()->unsynced_metahandles;
ScopedKernelLock lock(dir());
if (value) {
@@ -181,7 +181,7 @@ bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) {
}
}
kernel_->put(IS_UNSYNCED, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
return true;
}
@@ -193,7 +193,7 @@ bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
// Use kernel_->GetServerModelType() instead of
// GetServerModelType() as we may trigger some DCHECKs in the
// latter.
- MetahandleSet* index = &dir()->kernel_->unapplied_update_metahandles[
+ MetahandleSet* index = &dir()->kernel()->unapplied_update_metahandles[
kernel_->GetServerModelType()];
ScopedKernelLock lock(dir());
@@ -213,7 +213,7 @@ bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) {
}
}
kernel_->put(IS_UNAPPLIED_UPDATE, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
return true;
}
@@ -253,7 +253,7 @@ void ModelNeutralMutableEntry::PutServerNonUniqueName(
if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) {
kernel_->put(SERVER_NON_UNIQUE_NAME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -265,17 +265,17 @@ bool ModelNeutralMutableEntry::PutUniqueServerTag(const string& new_tag) {
base_write_transaction_->TrackChangesTo(kernel_);
ScopedKernelLock lock(dir());
// Make sure your new value is not in there already.
- if (dir()->kernel_->server_tags_map.find(new_tag) !=
- dir()->kernel_->server_tags_map.end()) {
+ if (dir()->kernel()->server_tags_map.find(new_tag) !=
+ dir()->kernel()->server_tags_map.end()) {
DVLOG(1) << "Detected duplicate server tag";
return false;
}
- dir()->kernel_->server_tags_map.erase(
+ dir()->kernel()->server_tags_map.erase(
kernel_->ref(UNIQUE_SERVER_TAG));
kernel_->put(UNIQUE_SERVER_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
if (!new_tag.empty()) {
- dir()->kernel_->server_tags_map[new_tag] = kernel_;
+ dir()->kernel()->server_tags_map[new_tag] = kernel_;
}
return true;
@@ -289,17 +289,17 @@ bool ModelNeutralMutableEntry::PutUniqueClientTag(const string& new_tag) {
base_write_transaction_->TrackChangesTo(kernel_);
ScopedKernelLock lock(dir());
// Make sure your new value is not in there already.
- if (dir()->kernel_->client_tags_map.find(new_tag) !=
- dir()->kernel_->client_tags_map.end()) {
+ if (dir()->kernel()->client_tags_map.find(new_tag) !=
+ dir()->kernel()->client_tags_map.end()) {
DVLOG(1) << "Detected duplicate client tag";
return false;
}
- dir()->kernel_->client_tags_map.erase(
+ dir()->kernel()->client_tags_map.erase(
kernel_->ref(UNIQUE_CLIENT_TAG));
kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
if (!new_tag.empty()) {
- dir()->kernel_->client_tags_map[new_tag] = kernel_;
+ dir()->kernel()->client_tags_map[new_tag] = kernel_;
}
return true;
@@ -325,7 +325,7 @@ void ModelNeutralMutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
}
kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
void ModelNeutralMutableEntry::PutServerSpecifics(
@@ -343,20 +343,20 @@ void ModelNeutralMutableEntry::PutServerSpecifics(
const ModelType old_server_type = kernel_->GetServerModelType();
const int64 metahandle = kernel_->ref(META_HANDLE);
size_t erase_count =
- dir()->kernel_->unapplied_update_metahandles[old_server_type]
+ dir()->kernel()->unapplied_update_metahandles[old_server_type]
.erase(metahandle);
DCHECK_EQ(erase_count, 1u);
}
kernel_->put(SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
if (kernel_->ref(IS_UNAPPLIED_UPDATE)) {
// Add ourselves back into unapplied_update_metahandles with our
// new server type.
const ModelType new_server_type = kernel_->GetServerModelType();
const int64 metahandle = kernel_->ref(META_HANDLE);
- dir()->kernel_->unapplied_update_metahandles[new_server_type]
+ dir()->kernel()->unapplied_update_metahandles[new_server_type]
.insert(metahandle);
}
}
@@ -372,7 +372,7 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics(
if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()
!= value.SerializeAsString()) {
kernel_->put(BASE_SERVER_SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -385,7 +385,7 @@ void ModelNeutralMutableEntry::PutServerUniquePosition(
DCHECK(value.IsValid());
ScopedKernelLock lock(dir());
kernel_->put(SERVER_UNIQUE_POSITION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -397,7 +397,7 @@ void ModelNeutralMutableEntry::PutServerAttachmentMetadata(
if (kernel_->ref(SERVER_ATTACHMENT_METADATA).SerializeAsString() !=
value.SerializeAsString()) {
kernel_->put(SERVER_ATTACHMENT_METADATA, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -413,20 +413,20 @@ void ModelNeutralMutableEntry::PutDirtySync(bool value) {
void ModelNeutralMutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
base_write_transaction_->TrackChangesTo(kernel_);
dir()->ReindexParentId(base_write_transaction(), kernel_, parent_id);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
void ModelNeutralMutableEntry::UpdateTransactionVersion(int64 value) {
ScopedKernelLock lock(dir());
kernel_->put(TRANSACTION_VERSION, value);
- kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles));
+ kernel_->mark_dirty(&(dir()->kernel()->dirty_metahandles));
}
ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans)
: Entry(trans), base_write_transaction_(trans) {}
MetahandleSet* ModelNeutralMutableEntry::GetDirtyIndexHelper() {
- return &dir()->kernel_->dirty_metahandles;
+ return &dir()->kernel()->dirty_metahandles;
}
} // namespace syncable
diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc
index f6f3f16..0b5ee55 100644
--- a/sync/syncable/mutable_entry.cc
+++ b/sync/syncable/mutable_entry.cc
@@ -28,7 +28,7 @@ void MutableEntry::Init(WriteTransaction* trans,
kernel->put(ID, trans->directory_->NextId());
kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
- kernel->mark_dirty(&trans->directory_->kernel_->dirty_metahandles);
+ kernel->mark_dirty(&trans->directory_->kernel()->dirty_metahandles);
kernel->put(NON_UNIQUE_NAME, name);
const base::Time& now = base::Time::Now();
kernel->put(CTIME, now);
@@ -126,7 +126,7 @@ void MutableEntry::PutLocalExternalId(int64 value) {
if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) {
ScopedKernelLock lock(dir());
kernel_->put(LOCAL_EXTERNAL_ID, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -135,7 +135,7 @@ void MutableEntry::PutMtime(base::Time value) {
write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(MTIME) != value) {
kernel_->put(MTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -144,7 +144,7 @@ void MutableEntry::PutCtime(base::Time value) {
write_transaction()->TrackChangesTo(kernel_);
if (kernel_->ref(CTIME) != value) {
kernel_->put(CTIME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -199,10 +199,10 @@ void MutableEntry::PutIsDel(bool value) {
// Some indices don't include deleted items and must be updated
// upon a value change.
ScopedParentChildIndexUpdater updater(lock, kernel_,
- &dir()->kernel_->parent_child_index);
+ &dir()->kernel()->parent_child_index);
kernel_->put(IS_DEL, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -212,7 +212,7 @@ void MutableEntry::PutNonUniqueName(const std::string& value) {
if (kernel_->ref(NON_UNIQUE_NAME) != value) {
kernel_->put(NON_UNIQUE_NAME, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -225,7 +225,7 @@ void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) {
if (kernel_->ref(SPECIFICS).SerializeAsString() !=
value.SerializeAsString()) {
kernel_->put(SPECIFICS, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -237,9 +237,9 @@ void MutableEntry::PutUniquePosition(const UniquePosition& value) {
DCHECK(value.IsValid());
ScopedKernelLock lock(dir());
ScopedParentChildIndexUpdater updater(
- lock, kernel_, &dir()->kernel_->parent_child_index);
+ lock, kernel_, &dir()->kernel()->parent_child_index);
kernel_->put(UNIQUE_POSITION, value);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -265,7 +265,7 @@ void MutableEntry::PutAttachmentMetadata(
kernel_->ref(ATTACHMENT_METADATA),
attachment_metadata);
kernel_->put(ATTACHMENT_METADATA, attachment_metadata);
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
}
}
@@ -283,7 +283,7 @@ void MutableEntry::MarkAttachmentAsOnServer(
continue;
record->set_is_on_server(true);
}
- kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles);
+ kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles);
MarkForSyncing(this);
}
diff --git a/sync/syncable/on_disk_directory_backing_store.h b/sync/syncable/on_disk_directory_backing_store.h
index 43bc74d..9002eb0 100644
--- a/sync/syncable/on_disk_directory_backing_store.h
+++ b/sync/syncable/on_disk_directory_backing_store.h
@@ -25,6 +25,11 @@ class SYNC_EXPORT_PRIVATE OnDiskDirectoryBackingStore
MetahandleSet* metahandles_to_purge,
Directory::KernelLoadInfo* kernel_load_info) override;
+ protected:
+ // Subclasses may override this to avoid a possible DCHECK.
+ virtual void ReportFirstTryOpenFailure();
+
+ private:
// A helper function that will make one attempt to load the directory.
// Unlike Load(), it does not attempt to recover from failure.
DirOpenResult TryLoad(Directory::MetahandlesMap* handles_map,
@@ -32,13 +37,6 @@ class SYNC_EXPORT_PRIVATE OnDiskDirectoryBackingStore
MetahandleSet* metahandles_to_purge,
Directory::KernelLoadInfo* kernel_load_info);
- protected:
- // Subclasses may override this to avoid a possible DCHECK.
- virtual void ReportFirstTryOpenFailure();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MinorCorruption);
-
bool allow_failure_for_test_;
base::FilePath backing_filepath_;
diff --git a/sync/syncable/scoped_kernel_lock.cc b/sync/syncable/scoped_kernel_lock.cc
index 4a27420..a2d7572 100644
--- a/sync/syncable/scoped_kernel_lock.cc
+++ b/sync/syncable/scoped_kernel_lock.cc
@@ -10,7 +10,7 @@ namespace syncer {
namespace syncable {
ScopedKernelLock::ScopedKernelLock(const Directory* dir)
- : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
+ : scoped_lock_(dir->kernel()->mutex), dir_(dir) {
}
ScopedKernelLock::~ScopedKernelLock() {}
diff --git a/sync/syncable/scoped_kernel_lock.h b/sync/syncable/scoped_kernel_lock.h
index affc375..c684810 100644
--- a/sync/syncable/scoped_kernel_lock.h
+++ b/sync/syncable/scoped_kernel_lock.h
@@ -19,7 +19,7 @@ class ScopedKernelLock {
~ScopedKernelLock();
base::AutoLock scoped_lock_;
- Directory* const dir_;
+ const Directory* const dir_;
DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
};
diff --git a/sync/syncable/syncable_base_transaction.cc b/sync/syncable/syncable_base_transaction.cc
index 8eb6d6b..c145c1f 100644
--- a/sync/syncable/syncable_base_transaction.cc
+++ b/sync/syncable/syncable_base_transaction.cc
@@ -24,11 +24,11 @@ void BaseTransaction::Lock() {
"src_file", from_here_.file_name(),
"src_func", from_here_.function_name());
- directory_->kernel_->transaction_mutex.Acquire();
+ directory_->kernel()->transaction_mutex.Acquire();
}
void BaseTransaction::Unlock() {
- directory_->kernel_->transaction_mutex.Release();
+ directory_->kernel()->transaction_mutex.Release();
}
void BaseTransaction::OnUnrecoverableError(
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index d1c6ae8..f7d85cc 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -561,17 +561,15 @@ TEST_F(SyncableDirectoryManagement, TestFileRelease) {
base::FilePath path =
temp_dir_.path().Append(Directory::kSyncDatabaseFilename);
- Directory dir(new OnDiskDirectoryBackingStore("ScopeTest", path),
- &handler_,
- NULL,
- NULL,
- NULL);
- DirOpenResult result =
- dir.Open("ScopeTest", &delegate_, NullTransactionObserver());
- ASSERT_EQ(result, OPENED);
- dir.Close();
-
- // Closing the directory should have released the backing database file.
+ {
+ Directory dir(new OnDiskDirectoryBackingStore("ScopeTest", path), &handler_,
+ NULL, NULL, NULL);
+ DirOpenResult result =
+ dir.Open("ScopeTest", &delegate_, NullTransactionObserver());
+ ASSERT_EQ(result, OPENED);
+ }
+
+ // Destroying the directory should have released the backing database file.
ASSERT_TRUE(base::DeleteFile(path, true));
}
diff --git a/sync/syncable/syncable_write_transaction.cc b/sync/syncable/syncable_write_transaction.cc
index d97ff67..6a5557f 100644
--- a/sync/syncable/syncable_write_transaction.cc
+++ b/sync/syncable/syncable_write_transaction.cc
@@ -45,7 +45,7 @@ void WriteTransaction::TrackChangesTo(const EntryKernel* entry) {
}
ImmutableEntryKernelMutationMap WriteTransaction::RecordMutations() {
- directory_->kernel_->transaction_mutex.AssertAcquired();
+ directory_->kernel()->transaction_mutex.AssertAcquired();
for (syncable::EntryKernelMutationMap::iterator it = mutations_.begin();
it != mutations_.end();) {
EntryKernel* kernel = directory()->GetEntryByHandle(it->first);
@@ -83,17 +83,17 @@ void WriteTransaction::UnlockAndNotify(
ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
const ImmutableEntryKernelMutationMap& mutations) {
- directory_->kernel_->transaction_mutex.AssertAcquired();
+ directory_->kernel()->transaction_mutex.AssertAcquired();
DCHECK(!mutations.Get().empty());
WriteTransactionInfo write_transaction_info(
- directory_->kernel_->next_write_transaction_id,
+ directory_->kernel()->next_write_transaction_id,
from_here_, writer_, mutations);
- ++directory_->kernel_->next_write_transaction_id;
+ ++directory_->kernel()->next_write_transaction_id;
ImmutableWriteTransactionInfo immutable_write_transaction_info(
&write_transaction_info);
- DirectoryChangeDelegate* const delegate = directory_->kernel_->delegate;
+ DirectoryChangeDelegate* const delegate = directory_->kernel()->delegate;
std::vector<int64> entry_changed;
if (writer_ == syncable::SYNCAPI) {
delegate->HandleCalculateChangesChangeEventFromSyncApi(
@@ -108,7 +108,7 @@ ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
delegate->HandleTransactionEndingChangeEvent(
immutable_write_transaction_info, this);
- directory_->kernel_->transaction_observer.Call(FROM_HERE,
+ directory_->kernel()->transaction_observer.Call(FROM_HERE,
&TransactionObserver::OnTransactionWrite,
immutable_write_transaction_info, models_with_changes);
@@ -117,7 +117,7 @@ ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding(
void WriteTransaction::NotifyTransactionComplete(
ModelTypeSet models_with_changes) {
- directory_->kernel_->delegate->HandleTransactionCompleteChangeEvent(
+ directory_->kernel()->delegate->HandleTransactionCompleteChangeEvent(
models_with_changes);
}