diff options
author | stanisc <stanisc@chromium.org> | 2015-07-16 16:18:56 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-07-16 23:19:33 +0000 |
commit | 193873c7e382f11bf09b6739d07ac82e9dd14fd7 (patch) | |
tree | 14e3928380fce49d4d2ebb938b6cd12939ac2561 /sync | |
parent | 4927df40d0342522fc268f884835b43f9004549e (diff) | |
download | chromium_src-193873c7e382f11bf09b6739d07ac82e9dd14fd7.zip chromium_src-193873c7e382f11bf09b6739d07ac82e9dd14fd7.tar.gz chromium_src-193873c7e382f11bf09b6739d07ac82e9dd14fd7.tar.bz2 |
Sync: MutableEntry shouldn't track changes that have no actual impact on the stored data
This change optimizes MutableEntry and ModelNeutralMutableEntry
methods that update sync data. Most of these methods compare
new and old values and skip the update when the values match.
However most of these methods still unconditionally register EntryKernel
to be tracked in EntryKernelMutation list by making a TrackChangesTo
call, which immediately makes a deep copy of EntryKernel.
Then there is one more deep copy at the end of the
transaction scope.
This fix avoids the two unnecessary deep copies by calling
TrackChangesTo only when necessary e.g. when the new value
is actually different.
In addition I used this as an opportunity to do a small code cleanup around
marking MutableEntry instances as dirty.
BUG=468144
Review URL: https://codereview.chromium.org/1237493002
Cr-Commit-Position: refs/heads/master@{#339162}
Diffstat (limited to 'sync')
-rw-r--r-- | sync/syncable/model_neutral_mutable_entry.cc | 82 | ||||
-rw-r--r-- | sync/syncable/model_neutral_mutable_entry.h | 2 | ||||
-rw-r--r-- | sync/syncable/mutable_entry.cc | 46 |
3 files changed, 64 insertions, 66 deletions
diff --git a/sync/syncable/model_neutral_mutable_entry.cc b/sync/syncable/model_neutral_mutable_entry.cc index bdf5299..498ba9b 100644 --- a/sync/syncable/model_neutral_mutable_entry.cc +++ b/sync/syncable/model_neutral_mutable_entry.cc @@ -105,66 +105,65 @@ ModelNeutralMutableEntry::ModelNeutralMutableEntry( void ModelNeutralMutableEntry::PutBaseVersion(int64 value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(BASE_VERSION) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(BASE_VERSION, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerVersion(int64 value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(SERVER_VERSION) != value) { + base_write_transaction_->TrackChangesTo(kernel_); ScopedKernelLock lock(dir()); kernel_->put(SERVER_VERSION, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerMtime(base::Time value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(SERVER_MTIME) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_MTIME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerCtime(base::Time value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(SERVER_CTIME) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_CTIME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } bool ModelNeutralMutableEntry::PutId(const Id& value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(ID) != value) { + base_write_transaction_->TrackChangesTo(kernel_); if (!dir()->ReindexId(base_write_transaction(), kernel_, value)) return false; - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } return true; } void ModelNeutralMutableEntry::PutServerParentId(const Id& value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); - if (kernel_->ref(SERVER_PARENT_ID) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_PARENT_ID, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(IS_UNSYNCED) != value) { + base_write_transaction_->TrackChangesTo(kernel_); MetahandleSet* index = &dir()->kernel()->unsynced_metahandles; ScopedKernelLock lock(dir()); @@ -184,15 +183,15 @@ bool ModelNeutralMutableEntry::PutIsUnsynced(bool value) { } } kernel_->put(IS_UNSYNCED, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } return true; } bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) { + base_write_transaction_->TrackChangesTo(kernel_); // Use kernel_->GetServerModelType() instead of // GetServerModelType() as we may trigger some DCHECKs in the // latter. @@ -216,28 +215,27 @@ bool ModelNeutralMutableEntry::PutIsUnappliedUpdate(bool value) { } } kernel_->put(IS_UNAPPLIED_UPDATE, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } return true; } void ModelNeutralMutableEntry::PutServerIsDir(bool value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); - bool old_value = kernel_->ref(SERVER_IS_DIR); - if (old_value != value) { + if (kernel_->ref(SERVER_IS_DIR) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_IS_DIR, value); - kernel_->mark_dirty(GetDirtyIndexHelper()); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerIsDel(bool value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); bool old_value = kernel_->ref(SERVER_IS_DEL); if (old_value != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_IS_DEL, value); - kernel_->mark_dirty(GetDirtyIndexHelper()); + MarkDirty(); } if (!value || kernel_->ref(IS_UNAPPLIED_UPDATE)) { @@ -259,11 +257,10 @@ void ModelNeutralMutableEntry::PutServerIsDel(bool value) { void ModelNeutralMutableEntry::PutServerNonUniqueName( const std::string& value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); - if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) { + base_write_transaction_->TrackChangesTo(kernel_); kernel_->put(SERVER_NON_UNIQUE_NAME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } @@ -283,7 +280,7 @@ bool ModelNeutralMutableEntry::PutUniqueServerTag(const string& new_tag) { dir()->kernel()->server_tags_map.erase( kernel_->ref(UNIQUE_SERVER_TAG)); kernel_->put(UNIQUE_SERVER_TAG, new_tag); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); if (!new_tag.empty()) { dir()->kernel()->server_tags_map[new_tag] = kernel_; } @@ -307,7 +304,7 @@ bool ModelNeutralMutableEntry::PutUniqueClientTag(const string& new_tag) { dir()->kernel()->client_tags_map.erase( kernel_->ref(UNIQUE_CLIENT_TAG)); kernel_->put(UNIQUE_CLIENT_TAG, new_tag); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); if (!new_tag.empty()) { dir()->kernel()->client_tags_map[new_tag] = kernel_; } @@ -323,6 +320,8 @@ void ModelNeutralMutableEntry::PutUniqueBookmarkTag(const std::string& tag) { return; } + //TODO(stanisc): Does this need a call to TrackChangesTo? + if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() && tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) { // There is only one scenario where our tag is expected to change. That @@ -335,18 +334,18 @@ void ModelNeutralMutableEntry::PutUniqueBookmarkTag(const std::string& tag) { } kernel_->put(UNIQUE_BOOKMARK_TAG, tag); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } void ModelNeutralMutableEntry::PutServerSpecifics( const sync_pb::EntitySpecifics& value) { DCHECK(kernel_); CHECK(!value.password().has_client_only_encrypted_data()); - base_write_transaction_->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? const std::string& serialized_value = value.SerializeAsString(); if (serialized_value != kernel_->ref(SERVER_SPECIFICS).SerializeAsString()) { + base_write_transaction_->TrackChangesTo(kernel_); if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { // Remove ourselves from unapplied_update_metahandles with our // old server type. @@ -365,7 +364,7 @@ void ModelNeutralMutableEntry::PutServerSpecifics( } else { kernel_->put(SERVER_SPECIFICS, value); } - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { // Add ourselves back into unapplied_update_metahandles with our @@ -382,12 +381,12 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics( const sync_pb::EntitySpecifics& value) { DCHECK(kernel_); CHECK(!value.password().has_client_only_encrypted_data()); - base_write_transaction_->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? const std::string& serialized_value = value.SerializeAsString(); if (serialized_value != kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()) { + base_write_transaction_->TrackChangesTo(kernel_); // Check for potential sharing - BASE_SERVER_SPECIFICS is often // copied from SERVER_SPECIFICS. if (serialized_value == @@ -396,30 +395,30 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics( } else { kernel_->put(BASE_SERVER_SPECIFICS, value); } - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerUniquePosition( const UniquePosition& value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) { + base_write_transaction_->TrackChangesTo(kernel_); // We should never overwrite a valid position with an invalid one. DCHECK(value.IsValid()); ScopedKernelLock lock(dir()); kernel_->put(SERVER_UNIQUE_POSITION, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void ModelNeutralMutableEntry::PutServerAttachmentMetadata( const sync_pb::AttachmentMetadata& value) { DCHECK(kernel_); - base_write_transaction_->TrackChangesTo(kernel_); const std::string& serialized_value = value.SerializeAsString(); if (serialized_value != kernel_->ref(SERVER_ATTACHMENT_METADATA).SerializeAsString()) { + base_write_transaction_->TrackChangesTo(kernel_); // Check for potential sharing - SERVER_ATTACHMENT_METADATA is often // copied from ATTACHMENT_METADATA. if (serialized_value == @@ -428,7 +427,7 @@ void ModelNeutralMutableEntry::PutServerAttachmentMetadata( } else { kernel_->put(SERVER_ATTACHMENT_METADATA, value); } - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } @@ -444,20 +443,19 @@ void ModelNeutralMutableEntry::PutDirtySync(bool value) { void ModelNeutralMutableEntry::PutParentIdPropertyOnly(const Id& parent_id) { base_write_transaction_->TrackChangesTo(kernel_); dir()->ReindexParentId(base_write_transaction(), kernel_, parent_id); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } void ModelNeutralMutableEntry::UpdateTransactionVersion(int64 value) { - ScopedKernelLock lock(dir()); kernel_->put(TRANSACTION_VERSION, value); - kernel_->mark_dirty(&(dir()->kernel()->dirty_metahandles)); + MarkDirty(); } ModelNeutralMutableEntry::ModelNeutralMutableEntry(BaseWriteTransaction* trans) : Entry(trans), base_write_transaction_(trans) {} -MetahandleSet* ModelNeutralMutableEntry::GetDirtyIndexHelper() { - return &dir()->kernel()->dirty_metahandles; +void ModelNeutralMutableEntry::MarkDirty() { + kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); } } // namespace syncable diff --git a/sync/syncable/model_neutral_mutable_entry.h b/sync/syncable/model_neutral_mutable_entry.h index 5ff71ad..76f5c63 100644 --- a/sync/syncable/model_neutral_mutable_entry.h +++ b/sync/syncable/model_neutral_mutable_entry.h @@ -100,7 +100,7 @@ class SYNC_EXPORT_PRIVATE ModelNeutralMutableEntry : public Entry { protected: explicit ModelNeutralMutableEntry(BaseWriteTransaction* trans); - syncable::MetahandleSet* GetDirtyIndexHelper(); + void MarkDirty(); private: friend class syncer::WriteNode; diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc index 8e33a67..6c13261 100644 --- a/sync/syncable/mutable_entry.cc +++ b/sync/syncable/mutable_entry.cc @@ -122,36 +122,36 @@ MutableEntry::MutableEntry(WriteTransaction* trans, GetTypeRoot, ModelType type) void MutableEntry::PutLocalExternalId(int64 value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) { + write_transaction()->TrackChangesTo(kernel_); ScopedKernelLock lock(dir()); kernel_->put(LOCAL_EXTERNAL_ID, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutMtime(base::Time value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if (kernel_->ref(MTIME) != value) { + write_transaction()->TrackChangesTo(kernel_); kernel_->put(MTIME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutCtime(base::Time value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if (kernel_->ref(CTIME) != value) { + write_transaction()->TrackChangesTo(kernel_); kernel_->put(CTIME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutParentId(const Id& value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if (kernel_->ref(PARENT_ID) != value) { + write_transaction()->TrackChangesTo(kernel_); PutParentIdPropertyOnly(value); if (!GetIsDel()) { if (!PutPredecessor(Id())) { @@ -164,20 +164,20 @@ void MutableEntry::PutParentId(const Id& value) { void MutableEntry::PutIsDir(bool value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); - bool old_value = kernel_->ref(IS_DIR); - if (old_value != value) { + if (kernel_->ref(IS_DIR) != value) { + write_transaction()->TrackChangesTo(kernel_); kernel_->put(IS_DIR, value); - kernel_->mark_dirty(GetDirtyIndexHelper()); + MarkDirty(); } } void MutableEntry::PutIsDel(bool value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if (value == kernel_->ref(IS_DEL)) { return; } + + write_transaction()->TrackChangesTo(kernel_); if (value) { // If the server never knew about this item and it's deleted then we don't // need to keep it around. Unsetting IS_UNSYNCED will: @@ -202,28 +202,27 @@ void MutableEntry::PutIsDel(bool value) { &dir()->kernel()->parent_child_index); kernel_->put(IS_DEL, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutNonUniqueName(const std::string& value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); - if (kernel_->ref(NON_UNIQUE_NAME) != value) { + write_transaction()->TrackChangesTo(kernel_); kernel_->put(NON_UNIQUE_NAME, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) { DCHECK(kernel_); CHECK(!value.password().has_client_only_encrypted_data()); - write_transaction()->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? const std::string& serialized_value = value.SerializeAsString(); if (serialized_value != kernel_->ref(SPECIFICS).SerializeAsString()) { + write_transaction()->TrackChangesTo(kernel_); // Check for potential sharing - SPECIFICS is often // copied from SERVER_SPECIFICS. if (serialized_value == @@ -232,21 +231,21 @@ void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) { } else { kernel_->put(SPECIFICS, value); } - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } void MutableEntry::PutUniquePosition(const UniquePosition& value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); if(!kernel_->ref(UNIQUE_POSITION).Equals(value)) { + write_transaction()->TrackChangesTo(kernel_); // We should never overwrite a valid position with an invalid one. DCHECK(value.IsValid()); ScopedKernelLock lock(dir()); ScopedParentChildIndexUpdater updater( lock, kernel_, &dir()->kernel()->parent_child_index); kernel_->put(UNIQUE_POSITION, value); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } @@ -258,6 +257,7 @@ bool MutableEntry::PutPredecessor(const Id& predecessor_id) { if (!predecessor.good()) return false; dir()->PutPredecessor(kernel_, predecessor.kernel_); + DCHECK(kernel_->is_dirty()); } return true; } @@ -265,10 +265,10 @@ bool MutableEntry::PutPredecessor(const Id& predecessor_id) { void MutableEntry::PutAttachmentMetadata( const sync_pb::AttachmentMetadata& value) { DCHECK(kernel_); - write_transaction()->TrackChangesTo(kernel_); const std::string& serialized_value = value.SerializeAsString(); if (serialized_value != kernel_->ref(ATTACHMENT_METADATA).SerializeAsString()) { + write_transaction()->TrackChangesTo(kernel_); dir()->UpdateAttachmentIndex(GetMetahandle(), kernel_->ref(ATTACHMENT_METADATA), value); // Check for potential sharing - ATTACHMENT_METADATA is often @@ -279,7 +279,7 @@ void MutableEntry::PutAttachmentMetadata( } else { kernel_->put(ATTACHMENT_METADATA, value); } - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); } } @@ -298,7 +298,7 @@ void MutableEntry::MarkAttachmentAsOnServer( record->set_is_on_server(true); } kernel_->put(ATTACHMENT_METADATA, attachment_metadata); - kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); + MarkDirty(); MarkForSyncing(this); } |