diff options
Diffstat (limited to 'sync/syncable/directory.cc')
-rw-r--r-- | sync/syncable/directory.cc | 395 |
1 files changed, 141 insertions, 254 deletions
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc index a9e0f65..a844e66 100644 --- a/sync/syncable/directory.cc +++ b/sync/syncable/directory.cc @@ -4,17 +4,18 @@ #include "sync/syncable/directory.h" +#include "base/base64.h" #include "base/debug/trace_event.h" -#include "base/perftimer.h" #include "base/stl_util.h" #include "base/string_number_conversions.h" -#include "sync/internal_api/public/base/node_ordinal.h" +#include "sync/internal_api/public/base/unique_position.h" #include "sync/internal_api/public/util/unrecoverable_error_handler.h" #include "sync/syncable/entry.h" #include "sync/syncable/entry_kernel.h" #include "sync/syncable/in_memory_directory_backing_store.h" #include "sync/syncable/on_disk_directory_backing_store.h" #include "sync/syncable/scoped_index_updater.h" +#include "sync/syncable/scoped_parent_child_index_updater.h" #include "sync/syncable/syncable-inl.h" #include "sync/syncable/syncable_base_transaction.h" #include "sync/syncable/syncable_changes_version.h" @@ -36,7 +37,6 @@ void InitializeIndexEntry(EntryKernel* entry, index->insert(entry); } } - } // static @@ -44,29 +44,6 @@ bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { return !a->ref(UNIQUE_CLIENT_TAG).empty(); } -bool ParentIdAndHandleIndexer::Comparator::operator() ( - const syncable::EntryKernel* a, - const syncable::EntryKernel* b) const { - int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); - if (cmp != 0) - return cmp < 0; - - const NodeOrdinal& a_position = a->ref(SERVER_ORDINAL_IN_PARENT); - const NodeOrdinal& b_position = b->ref(SERVER_ORDINAL_IN_PARENT); - if (!a_position.Equals(b_position)) - return a_position.LessThan(b_position); - - cmp = a->ref(ID).compare(b->ref(ID)); - return cmp < 0; -} - -// static -bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { - // This index excludes deleted items and the root item. The root - // item is excluded so that it doesn't show up as a child of itself. - return !a->ref(IS_DEL) && !a->ref(ID).IsRoot(); -} - // static const base::FilePath::CharType Directory::kSyncDatabaseFilename[] = FILE_PATH_LITERAL("SyncData.sqlite3"); @@ -116,7 +93,7 @@ Directory::Kernel::Kernel( name(name), metahandles_index(new Directory::MetahandlesIndex), ids_index(new Directory::IdsIndex), - parent_id_child_index(new Directory::ParentIdChildIndex), + parent_child_index(new ParentChildIndex), client_tag_index(new Directory::ClientTagIndex), unsynced_metahandles(new MetahandleSet), dirty_metahandles(new MetahandleSet), @@ -135,7 +112,7 @@ Directory::Kernel::~Kernel() { delete unsynced_metahandles; delete dirty_metahandles; delete metahandles_to_purge; - delete parent_id_child_index; + delete parent_child_index; delete client_tag_index; delete ids_index; STLDeleteElements(metahandles_index); @@ -181,8 +158,8 @@ void Directory::InitializeIndices() { MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); for (; it != kernel_->metahandles_index->end(); ++it) { EntryKernel* entry = *it; - InitializeIndexEntry<ParentIdAndHandleIndexer>(entry, - kernel_->parent_id_child_index); + if (ParentChildIndex::ShouldInclude(entry)) + kernel_->parent_child_index->Insert(entry); InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index); InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index); const int64 metahandle = entry->ref(META_HANDLE); @@ -367,8 +344,8 @@ bool Directory::InsertEntry(WriteTransaction* trans, trans)) return false; - if (!entry->ref(IS_DEL)) { - if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second, + if (ParentChildIndex::ShouldInclude(entry)) { + if (!SyncAssert(kernel_->parent_child_index->Insert(entry), FROM_HERE, error, trans)) { @@ -399,8 +376,8 @@ bool Directory::ReindexId(WriteTransaction* trans, { // Update the indices that depend on the ID field. ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); - ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry, - kernel_->parent_id_child_index); + ScopedParentChildIndexUpdater updater_b(lock, entry, + kernel_->parent_child_index); entry->put(ID, new_id); } return true; @@ -413,8 +390,8 @@ bool Directory::ReindexParentId(WriteTransaction* trans, { // Update the indices that depend on the PARENT_ID field. - ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry, - kernel_->parent_id_child_index); + ScopedParentChildIndexUpdater index_updater(lock, entry, + kernel_->parent_child_index); entry->put(PARENT_ID, new_parent_id); } return true; @@ -550,7 +527,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { // Might not be in it num_erased = kernel_->client_tag_index->erase(entry); DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); - if (!SyncAssert(!kernel_->parent_id_child_index->count(entry), + if (!SyncAssert(!kernel_->parent_child_index->Contains(entry), FROM_HERE, "Deleted entry still present", (&trans))) @@ -589,8 +566,6 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types, // Note the dance around incrementing |it|, since we sometimes erase(). if ((IsRealDataType(local_type) && types.Has(local_type)) || (IsRealDataType(server_type) && types.Has(server_type))) { - if (!UnlinkEntryFromOrder(*it, &trans, &lock, DATA_TYPE_PURGE)) - return false; int64 handle = (*it)->ref(META_HANDLE); kernel_->metahandles_to_purge->insert(handle); @@ -606,8 +581,8 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types, num_erased = kernel_->unapplied_update_metahandles[server_type].erase(handle); DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); - num_erased = kernel_->parent_id_child_index->erase(entry); - DCHECK_EQ(entry->ref(IS_DEL), !num_erased); + if (kernel_->parent_child_index->Contains(entry)) + kernel_->parent_child_index->Remove(entry); kernel_->metahandles_index->erase(it++); if ((types_to_journal.Has(local_type) || @@ -730,14 +705,6 @@ bool Directory::InitialSyncEndedForType( return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION; } -template <class T> void Directory::TestAndSet( - T* kernel_data, const T* data_to_set) { - if (*kernel_data != *data_to_set) { - *kernel_data = *data_to_set; - kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; - } -} - string Directory::store_birthday() const { ScopedKernelLock lock(this); return kernel_->persisted_info.store_birthday; @@ -1030,76 +997,6 @@ void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) { invariant_check_level_ = check_level; } -bool Directory::UnlinkEntryFromOrder(EntryKernel* entry, - WriteTransaction* trans, - ScopedKernelLock* lock, - UnlinkReason unlink_reason) { - if (!SyncAssert(!trans || this == trans->directory(), - FROM_HERE, - "Transaction not pointing to the right directory", - trans)) - return false; - Id old_previous = entry->ref(PREV_ID); - Id old_next = entry->ref(NEXT_ID); - - entry->put(NEXT_ID, entry->ref(ID)); - entry->put(PREV_ID, entry->ref(ID)); - entry->mark_dirty(kernel_->dirty_metahandles); - - if (!old_previous.IsRoot()) { - if (old_previous == old_next) { - // Note previous == next doesn't imply previous == next == Get(ID). We - // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added - // and deleted before receiving the server ID in the commit response. - if (!SyncAssert( - (old_next == entry->ref(ID)) || !old_next.ServerKnows(), - FROM_HERE, - "Encounteered inconsistent entry while deleting", - trans)) { - return false; - } - return true; // Done if we were already self-looped (hence unlinked). - } - EntryKernel* previous_entry = GetEntryById(old_previous, lock); - ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); - // TODO(tim): Multiple asserts here for bug 101039 investigation. - if (type == AUTOFILL) { - if (!SyncAssert(previous_entry != NULL, - FROM_HERE, - "Could not find previous autofill entry", - trans)) { - return false; - } - } else { - if (!SyncAssert(previous_entry != NULL, - FROM_HERE, - "Could not find previous entry", - trans)) { - return false; - } - } - if (unlink_reason == NODE_MANIPULATION) - trans->SaveOriginal(previous_entry); - previous_entry->put(NEXT_ID, old_next); - previous_entry->mark_dirty(kernel_->dirty_metahandles); - } - - if (!old_next.IsRoot()) { - EntryKernel* next_entry = GetEntryById(old_next, lock); - if (!SyncAssert(next_entry != NULL, - FROM_HERE, - "Could not find next entry", - trans)) { - return false; - } - if (unlink_reason == NODE_MANIPULATION) - trans->SaveOriginal(next_entry); - next_entry->put(PREV_ID, old_previous); - next_entry->mark_dirty(kernel_->dirty_metahandles); - } - return true; -} - int64 Directory::NextMetahandle() { ScopedKernelLock lock(this); int64 metahandle = (kernel_->next_metahandle)++; @@ -1121,171 +1018,161 @@ Id Directory::NextId() { bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { ScopedKernelLock lock(this); - return (GetPossibleFirstChild(lock, id) != NULL); + return kernel_->parent_child_index->GetChildren(id) != NULL; } -bool Directory::GetFirstChildId(BaseTransaction* trans, - const Id& parent_id, - Id* first_child_id) { +Id Directory::GetFirstChildId(BaseTransaction* trans, + const EntryKernel* parent) { + DCHECK(parent); + DCHECK(parent->ref(IS_DIR)); + ScopedKernelLock lock(this); - EntryKernel* entry = GetPossibleFirstChild(lock, parent_id); - if (!entry) { - *first_child_id = Id(); - return true; - } + const OrderedChildSet* children = + kernel_->parent_child_index->GetChildren(parent->ref(ID)); - // Walk to the front of the list; the server position ordering - // is commonly identical to the linked-list ordering, but pending - // unsynced or unapplied items may diverge. - while (!entry->ref(PREV_ID).IsRoot()) { - entry = GetEntryById(entry->ref(PREV_ID), &lock); - if (!entry) { - *first_child_id = Id(); - return false; - } - } - *first_child_id = entry->ref(ID); - return true; + // We're expected to return root if there are no children. + if (!children) + return Id(); + + return (*children->begin())->ref(ID); } -bool Directory::GetLastChildIdForTest( - BaseTransaction* trans, const Id& parent_id, Id* last_child_id) { +syncable::Id Directory::GetPredecessorId(EntryKernel* e) { ScopedKernelLock lock(this); - EntryKernel* entry = GetPossibleFirstChild(lock, parent_id); - if (!entry) { - *last_child_id = Id(); - return true; - } - // Walk to the back of the list; the server position ordering - // is commonly identical to the linked-list ordering, but pending - // unsynced or unapplied items may diverge. - while (!entry->ref(NEXT_ID).IsRoot()) { - entry = GetEntryById(entry->ref(NEXT_ID), &lock); - if (!entry) { - *last_child_id = Id(); - return false; - } + DCHECK(ParentChildIndex::ShouldInclude(e)); + const OrderedChildSet* children = + kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); + DCHECK(children && !children->empty()); + OrderedChildSet::const_iterator i = children->find(e); + DCHECK(i != children->end()); + + if (i == children->begin()) { + return Id(); + } else { + i--; + return (*i)->ref(ID); } - - *last_child_id = entry->ref(ID); - return true; } -Id Directory::ComputePrevIdFromServerPosition( - const EntryKernel* entry, - const syncable::Id& parent_id) { +syncable::Id Directory::GetSuccessorId(EntryKernel* e) { ScopedKernelLock lock(this); - // Find the natural insertion point in the parent_id_child_index, and - // work back from there, filtering out ineligible candidates. - ParentIdChildIndex::iterator sibling = LocateInParentChildIndex( - lock, - parent_id, - NodeOrdinalToInt64(entry->ref(SERVER_ORDINAL_IN_PARENT)), - entry->ref(ID)); - ParentIdChildIndex::iterator first_sibling = - GetParentChildIndexLowerBound(lock, parent_id); - - while (sibling != first_sibling) { - --sibling; - EntryKernel* candidate = *sibling; - - // The item itself should never be in the range under consideration. - DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE)); - - // Ignore unapplied updates -- they might not even be server-siblings. - if (candidate->ref(IS_UNAPPLIED_UPDATE)) - continue; + DCHECK(ParentChildIndex::ShouldInclude(e)); + const OrderedChildSet* children = + kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); + DCHECK(children && !children->empty()); + OrderedChildSet::const_iterator i = children->find(e); + DCHECK(i != children->end()); + + i++; + if (i == children->end()) { + return Id(); + } else { + return (*i)->ref(ID); + } +} - // We can't trust the SERVER_ fields of unsynced items, but they are - // potentially legitimate local predecessors. In the case where - // |update_item| and an unsynced item wind up in the same insertion - // position, we need to choose how to order them. The following check puts - // the unapplied update first; removing it would put the unsynced item(s) - // first. - if (candidate->ref(IS_UNSYNCED)) - continue; +// TODO(rlarocque): Remove all support for placing ShouldMaintainPosition() +// items as siblings of items that do not maintain postions. It is required +// only for tests. See crbug.com/178282. +void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { + DCHECK(!e->ref(IS_DEL)); + if (!e->ShouldMaintainPosition()) { + DCHECK(!e->ref(UNIQUE_POSITION).IsValid()); + return; + } + std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG); + DCHECK(!suffix.empty()); - // Skip over self-looped items, which are not valid predecessors. This - // shouldn't happen in practice, but is worth defending against. - if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) && - !candidate->ref(PREV_ID).IsRoot()) { - NOTREACHED(); - continue; + // Remove our item from the ParentChildIndex and remember to re-add it later. + ScopedKernelLock lock(this); + ScopedParentChildIndexUpdater updater(lock, e, kernel_->parent_child_index); + + // Note: The ScopedParentChildIndexUpdater will update this set for us as we + // leave this function. + const OrderedChildSet* siblings = + kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID)); + + if (!siblings) { + // This parent currently has no other children. + DCHECK(predecessor->ref(ID).IsRoot()); + UniquePosition pos = UniquePosition::InitialPosition(suffix); + e->put(UNIQUE_POSITION, pos); + return; + } + + if (predecessor->ref(ID).IsRoot()) { + // We have at least one sibling, and we're inserting to the left of them. + UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION); + + UniquePosition pos; + if (!successor_pos.IsValid()) { + // If all our successors are of non-positionable types, just create an + // initial position. We arbitrarily choose to sort invalid positions to + // the right of the valid positions. + // + // We really shouldn't need to support this. See TODO above. + pos = UniquePosition::InitialPosition(suffix); + } else { + DCHECK(!siblings->empty()); + pos = UniquePosition::Before(successor_pos, suffix); } - return candidate->ref(ID); + + e->put(UNIQUE_POSITION, pos); + return; } - // This item will be the first in the sibling order. - return Id(); -} -Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex( - const ScopedKernelLock& lock, - const Id& parent_id, - int64 position_in_parent, - const Id& item_id_for_tiebreaking) { - kernel_->needle.put(PARENT_ID, parent_id); - kernel_->needle.put(SERVER_ORDINAL_IN_PARENT, - Int64ToNodeOrdinal(position_in_parent)); - kernel_->needle.put(ID, item_id_for_tiebreaking); - return kernel_->parent_id_child_index->lower_bound(&kernel_->needle); -} + // We can't support placing an item after an invalid position. Fortunately, + // the tests don't exercise this particular case. We should not support + // siblings with invalid positions at all. See TODO above. + DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid()); + + OrderedChildSet::const_iterator neighbour = siblings->find(predecessor); + DCHECK(neighbour != siblings->end()); + + ++neighbour; + if (neighbour == siblings->end()) { + // Inserting at the end of the list. + UniquePosition pos = UniquePosition::After( + predecessor->ref(UNIQUE_POSITION), + suffix); + e->put(UNIQUE_POSITION, pos); + return; + } -Directory::ParentIdChildIndex::iterator -Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock, - const Id& parent_id) { - // Peg the parent ID, and use the least values for the remaining - // index variables. - return LocateInParentChildIndex(lock, parent_id, - std::numeric_limits<int64>::min(), - Id::GetLeastIdForLexicographicComparison()); -} + EntryKernel* successor = *neighbour; + + // Another mixed valid and invalid position case. This one could be supported + // in theory, but we're trying to deprecate support for siblings with and + // without valid positions. See TODO above. + DCHECK(successor->ref(UNIQUE_POSITION).IsValid()); -Directory::ParentIdChildIndex::iterator -Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock, - const Id& parent_id) { - // The upper bound of |parent_id|'s range is the lower - // bound of |++parent_id|'s range. - return GetParentChildIndexLowerBound(lock, - parent_id.GetLexicographicSuccessor()); + // Finally, the normal case: inserting between two elements. + UniquePosition pos = UniquePosition::Between( + predecessor->ref(UNIQUE_POSITION), + successor->ref(UNIQUE_POSITION), + suffix); + e->put(UNIQUE_POSITION, pos); + return; } +// TODO(rlarocque): Avoid this indirection. Just return the set. void Directory::AppendChildHandles(const ScopedKernelLock& lock, const Id& parent_id, Directory::ChildHandles* result) { - typedef ParentIdChildIndex::iterator iterator; - CHECK(result); - for (iterator i = GetParentChildIndexLowerBound(lock, parent_id), - end = GetParentChildIndexUpperBound(lock, parent_id); - i != end; ++i) { + const OrderedChildSet* children = + kernel_->parent_child_index->GetChildren(parent_id); + if (!children) + return; + + for (OrderedChildSet::const_iterator i = children->begin(); + i != children->end(); ++i) { DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); result->push_back((*i)->ref(META_HANDLE)); } } -EntryKernel* Directory::GetPossibleFirstChild( - const ScopedKernelLock& lock, const Id& parent_id) { - // We can use the server positional ordering as a hint because it's generally - // in sync with the local (linked-list) positional ordering, and we have an - // index on it. - ParentIdChildIndex::iterator candidate = - GetParentChildIndexLowerBound(lock, parent_id); - ParentIdChildIndex::iterator end_range = - GetParentChildIndexUpperBound(lock, parent_id); - for (; candidate != end_range; ++candidate) { - EntryKernel* entry = *candidate; - // Filter out self-looped items, which are temporarily not in the child - // ordering. - if (entry->ref(PREV_ID).IsRoot() || - entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { - return entry; - } - } - // There were no children in the linked list. - return NULL; -} - ScopedKernelLock::ScopedKernelLock(const Directory* dir) : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { } |