summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/sync/glue/bookmark_change_processor.cc10
-rw-r--r--chrome/browser/sync/profile_sync_service_bookmark_unittest.cc15
-rw-r--r--chrome/browser/sync/profile_sync_service_password_unittest.cc23
-rw-r--r--sync/engine/build_commit_command.cc88
-rw-r--r--sync/engine/build_commit_command.h16
-rw-r--r--sync/engine/build_commit_command_unittest.cc105
-rw-r--r--sync/engine/conflict_resolver.cc66
-rw-r--r--sync/engine/process_commit_response_command.cc31
-rw-r--r--sync/engine/process_commit_response_command_unittest.cc41
-rw-r--r--sync/engine/process_updates_command.cc24
-rw-r--r--sync/engine/process_updates_command_unittest.cc109
-rw-r--r--sync/engine/syncer.cc23
-rw-r--r--sync/engine/syncer.h1
-rw-r--r--sync/engine/syncer_proto_util.cc10
-rw-r--r--sync/engine/syncer_proto_util.h2
-rw-r--r--sync/engine/syncer_unittest.cc284
-rw-r--r--sync/engine/syncer_util.cc137
-rw-r--r--sync/engine/syncer_util.h45
-rw-r--r--sync/internal_api/base_node.cc44
-rw-r--r--sync/internal_api/change_reorder_buffer.cc10
-rw-r--r--sync/internal_api/public/base/model_type.h4
-rw-r--r--sync/internal_api/public/base/unique_position.cc10
-rw-r--r--sync/internal_api/public/base/unique_position.h3
-rw-r--r--sync/internal_api/public/base/unique_position_unittest.cc4
-rw-r--r--sync/internal_api/sync_manager_impl.cc8
-rw-r--r--sync/internal_api/sync_manager_impl.h9
-rw-r--r--sync/internal_api/sync_manager_impl_unittest.cc59
-rw-r--r--sync/internal_api/test/test_entry_factory.cc19
-rw-r--r--sync/protocol/proto_value_conversions.cc9
-rw-r--r--sync/sync_core.gypi5
-rw-r--r--sync/sync_tests.gypi2
-rw-r--r--sync/syncable/directory.cc395
-rw-r--r--sync/syncable/directory.h109
-rw-r--r--sync/syncable/directory_backing_store.cc184
-rw-r--r--sync/syncable/directory_backing_store.h1
-rw-r--r--sync/syncable/directory_backing_store_unittest.cc255
-rw-r--r--sync/syncable/entry.cc21
-rw-r--r--sync/syncable/entry.h13
-rw-r--r--sync/syncable/entry_kernel.cc35
-rw-r--r--sync/syncable/entry_kernel.h40
-rw-r--r--sync/syncable/in_memory_directory_backing_store.cc11
-rw-r--r--sync/syncable/in_memory_directory_backing_store.h6
-rw-r--r--sync/syncable/model_type.cc4
-rw-r--r--sync/syncable/mutable_entry.cc138
-rw-r--r--sync/syncable/mutable_entry.h5
-rw-r--r--sync/syncable/nigori_util.cc14
-rw-r--r--sync/syncable/parent_child_index.cc115
-rw-r--r--sync/syncable/parent_child_index.h66
-rw-r--r--sync/syncable/parent_child_index_unittest.cc344
-rw-r--r--sync/syncable/scoped_parent_child_index_updater.cc28
-rw-r--r--sync/syncable/scoped_parent_child_index_updater.h37
-rw-r--r--sync/syncable/syncable_columns.h8
-rw-r--r--sync/syncable/syncable_enum_conversions.cc21
-rw-r--r--sync/syncable/syncable_enum_conversions.h4
-rw-r--r--sync/syncable/syncable_enum_conversions_unittest.cc5
-rw-r--r--sync/syncable/syncable_unittest.cc78
-rw-r--r--sync/syncable/syncable_util.cc29
-rw-r--r--sync/syncable/syncable_util.h7
-rw-r--r--sync/test/engine/mock_connection_manager.cc29
-rw-r--r--sync/test/engine/mock_connection_manager.h1
-rw-r--r--sync/test/test_directory_backing_store.h3
-rw-r--r--sync/tools/testserver/chromiumsync.py22
62 files changed, 1865 insertions, 1379 deletions
diff --git a/chrome/browser/sync/glue/bookmark_change_processor.cc b/chrome/browser/sync/glue/bookmark_change_processor.cc
index 4a660bb..0f22b2b 100644
--- a/chrome/browser/sync/glue/bookmark_change_processor.cc
+++ b/chrome/browser/sync/glue/bookmark_change_processor.cc
@@ -179,10 +179,7 @@ void BookmarkChangeProcessor::RemoveAllChildNodes(
const int64 sync_node_id = dfs_sync_id_stack.top();
syncer::WriteNode node(trans);
node.InitByIdLookup(sync_node_id);
- int64 child_id = node.GetFirstChildId();
- if (child_id != syncer::kInvalidId) {
- dfs_sync_id_stack.push(child_id);
- } else {
+ if (!node.GetIsFolder() || node.GetFirstChildId() == syncer::kInvalidId) {
// All children of the node has been processed, delete the node and
// pop it off the stack.
dfs_sync_id_stack.pop();
@@ -194,6 +191,11 @@ void BookmarkChangeProcessor::RemoveAllChildNodes(
// the stack should be empty.
DCHECK(dfs_sync_id_stack.empty());
}
+ } else {
+ int64 child_id = node.GetFirstChildId();
+ if (child_id != syncer::kInvalidId) {
+ dfs_sync_id_stack.push(child_id);
+ }
}
}
}
diff --git a/chrome/browser/sync/profile_sync_service_bookmark_unittest.cc b/chrome/browser/sync/profile_sync_service_bookmark_unittest.cc
index 38dbaa7..bc62c97 100644
--- a/chrome/browser/sync/profile_sync_service_bookmark_unittest.cc
+++ b/chrome/browser/sync/profile_sync_service_bookmark_unittest.cc
@@ -126,7 +126,8 @@ class FakeServerChange {
// Delete the sync node.
syncer::WriteNode node(trans_);
EXPECT_EQ(BaseNode::INIT_OK, node.InitByIdLookup(id));
- EXPECT_FALSE(node.GetFirstChildId());
+ if (node.GetIsFolder())
+ EXPECT_FALSE(node.GetFirstChildId());
node.GetMutableEntryForTest()->Put(syncer::syncable::SERVER_IS_DEL,
true);
node.Tombstone();
@@ -567,8 +568,9 @@ class ProfileSyncServiceBookmarkTest : public testing::Test {
syncer::ReadNode gnode(trans);
ASSERT_EQ(BaseNode::INIT_OK, gnode.InitByIdLookup(id));
- stack.push(gnode.GetFirstChildId());
stack.push(gnode.GetSuccessorId());
+ if (gnode.GetIsFolder())
+ stack.push(gnode.GetFirstChildId());
}
}
@@ -1809,13 +1811,6 @@ TEST_F(ProfileSyncServiceBookmarkTestWithData, UpdateTransactionVersion) {
GetTransactionVersions(model_->root_node(), &new_versions);
EXPECT_EQ(initial_versions[model_->root_node()->id()] + 1,
new_versions[model_->root_node()->id()]);
- // HACK(haitaol): siblings of removed node are actually updated in sync model
- // because of NEXT_ID/PREV_ID. After switching to ordinal,
- // siblings will not get updated and the hack below can be
- // removed.
- model_->SetNodeMetaInfo(bookmark_bar->GetChild(0),
- kBookmarkTransactionVersionKey, "41");
- initial_versions[bookmark_bar->GetChild(0)->id()] = 41;
ExpectTransactionVersionMatch(model_->bookmark_bar_node(), initial_versions);
ExpectTransactionVersionMatch(model_->other_node(), initial_versions);
ExpectTransactionVersionMatch(model_->mobile_node(), initial_versions);
@@ -1829,7 +1824,7 @@ TEST_F(ProfileSyncServiceBookmarkTestWithData, UpdateTransactionVersion) {
GetTransactionVersions(model_->root_node(), &new_versions);
EXPECT_EQ(initial_versions[model_->root_node()->id()] + 2,
new_versions[model_->root_node()->id()]);
- EXPECT_EQ(initial_versions[changed_bookmark->id()] + 1,
+ EXPECT_LT(initial_versions[changed_bookmark->id()],
new_versions[changed_bookmark->id()]);
initial_versions.erase(changed_bookmark->id());
ExpectTransactionVersionMatch(model_->bookmark_bar_node(), initial_versions);
diff --git a/chrome/browser/sync/profile_sync_service_password_unittest.cc b/chrome/browser/sync/profile_sync_service_password_unittest.cc
index cc26c7a..bf839be 100644
--- a/chrome/browser/sync/profile_sync_service_password_unittest.cc
+++ b/chrome/browser/sync/profile_sync_service_password_unittest.cc
@@ -253,6 +253,27 @@ class ProfileSyncServicePasswordTest : public AbstractProfileSyncServiceTest {
}
}
+ // Helper to sort the results of GetPasswordEntriesFromSyncDB. The sorting
+ // doesn't need to be particularly intelligent, it just needs to be consistent
+ // enough that we can base our tests expectations on the ordering it provides.
+ static bool PasswordFormComparator(const PasswordForm& pf1,
+ const PasswordForm& pf2) {
+ if (pf1.submit_element < pf2.submit_element)
+ return true;
+ if (pf1.username_element < pf2.username_element)
+ return true;
+ if (pf1.username_value < pf2.username_value)
+ return true;
+ if (pf1.username_value < pf2.username_value)
+ return true;
+ if (pf1.password_element < pf2.password_element)
+ return true;
+ if (pf1.password_value < pf2.password_value)
+ return true;
+
+ return false;
+ }
+
void GetPasswordEntriesFromSyncDB(std::vector<PasswordForm>* entries) {
syncer::ReadTransaction trans(FROM_HERE, sync_service_->GetUserShare());
syncer::ReadNode password_root(&trans);
@@ -275,6 +296,8 @@ class ProfileSyncServicePasswordTest : public AbstractProfileSyncServiceTest {
child_id = child_node.GetSuccessorId();
}
+
+ std::sort(entries->begin(), entries->end(), PasswordFormComparator);
}
bool ComparePasswords(const PasswordForm& lhs, const PasswordForm& rhs) {
diff --git a/sync/engine/build_commit_command.cc b/sync/engine/build_commit_command.cc
index 531eaf6..96772ad 100644
--- a/sync/engine/build_commit_command.cc
+++ b/sync/engine/build_commit_command.cc
@@ -11,6 +11,7 @@
#include "base/string_util.h"
#include "sync/engine/syncer_proto_util.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/sync.pb.h"
#include "sync/sessions/ordered_commit_set.h"
@@ -22,11 +23,6 @@
#include "sync/syncable/syncable_proto_util.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::set;
using std::string;
using std::vector;
@@ -36,26 +32,11 @@ namespace syncer {
using sessions::SyncSession;
using syncable::Entry;
using syncable::IS_DEL;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
using syncable::Id;
using syncable::SPECIFICS;
-
-// static
-int64 BuildCommitCommand::GetFirstPosition() {
- return std::numeric_limits<int64>::min();
-}
-
-// static
-int64 BuildCommitCommand::GetLastPosition() {
- return std::numeric_limits<int64>::max();
-}
-
-// static
-int64 BuildCommitCommand::GetGap() {
- return 1LL << 20;
-}
+using syncable::UNIQUE_POSITION;
BuildCommitCommand::BuildCommitCommand(
syncable::BaseTransaction* trans,
@@ -216,30 +197,16 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
sync_entry->set_deleted(true);
} else {
if (meta_entry.Get(SPECIFICS).has_bookmark()) {
- // Common data in both new and old protocol.
+ // Both insert_after_item_id and position_in_parent fields are set only
+ // for legacy reasons. See comments in sync.proto for more information.
const Id& prev_id = meta_entry.GetPredecessorId();
string prev_id_string =
prev_id.IsRoot() ? string() : prev_id.GetServerId();
sync_entry->set_insert_after_item_id(prev_id_string);
-
- // Compute a numeric position based on what we know locally.
- std::pair<int64, int64> position_block(
- GetFirstPosition(), GetLastPosition());
- std::map<Id, std::pair<int64, int64> >::iterator prev_pos =
- position_map.find(prev_id);
- if (prev_pos != position_map.end()) {
- position_block = prev_pos->second;
- position_map.erase(prev_pos);
- } else {
- position_block = std::make_pair(
- FindAnchorPosition(syncable::PREV_ID, meta_entry),
- FindAnchorPosition(syncable::NEXT_ID, meta_entry));
- }
- position_block.first = BuildCommitCommand::InterpolatePosition(
- position_block.first, position_block.second);
-
- position_map[id] = position_block;
- sync_entry->set_position_in_parent(position_block.first);
+ sync_entry->set_position_in_parent(
+ meta_entry.Get(UNIQUE_POSITION).ToInt64());
+ meta_entry.Get(UNIQUE_POSITION).ToProto(
+ sync_entry->mutable_unique_position());
}
SetEntrySpecifics(&meta_entry, sync_entry);
}
@@ -248,43 +215,4 @@ SyncerError BuildCommitCommand::ExecuteImpl(SyncSession* session) {
return SYNCER_OK;
}
-int64 BuildCommitCommand::FindAnchorPosition(syncable::IdField direction,
- const syncable::Entry& entry) {
- Id next_id = entry.Get(direction);
- while (!next_id.IsRoot()) {
- Entry next_entry(entry.trans(),
- syncable::GET_BY_ID,
- next_id);
- if (!next_entry.Get(IS_UNSYNCED) && !next_entry.Get(IS_UNAPPLIED_UPDATE)) {
- return NodeOrdinalToInt64(next_entry.Get(SERVER_ORDINAL_IN_PARENT));
- }
- next_id = next_entry.Get(direction);
- }
- return
- direction == syncable::PREV_ID ?
- GetFirstPosition() : GetLastPosition();
-}
-
-// static
-int64 BuildCommitCommand::InterpolatePosition(const int64 lo,
- const int64 hi) {
- DCHECK_LE(lo, hi);
-
- // The first item to be added under a parent gets a position of zero.
- if (lo == GetFirstPosition() && hi == GetLastPosition())
- return 0;
-
- // For small gaps, we do linear interpolation. For larger gaps,
- // we use an additive offset of |GetGap()|. We are careful to avoid
- // signed integer overflow.
- uint64 delta = static_cast<uint64>(hi) - static_cast<uint64>(lo);
- if (delta <= static_cast<uint64>(GetGap()*2))
- return lo + (static_cast<int64>(delta) + 7) / 8; // Interpolate.
- else if (lo == GetFirstPosition())
- return hi - GetGap(); // Extend range just before successor.
- else
- return lo + GetGap(); // Use or extend range just after predecessor.
-}
-
-
} // namespace syncer
diff --git a/sync/engine/build_commit_command.h b/sync/engine/build_commit_command.h
index a1b101d..a55a0b4 100644
--- a/sync/engine/build_commit_command.h
+++ b/sync/engine/build_commit_command.h
@@ -51,11 +51,6 @@ class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
private:
FRIEND_TEST_ALL_PREFIXES(BuildCommitCommandTest, InterpolatePosition);
- // Functions returning constants controlling range of values.
- static int64 GetFirstPosition();
- static int64 GetLastPosition();
- static int64 GetGap();
-
void AddExtensionsActivityToMessage(sessions::SyncSession* session,
sync_pb::CommitMessage* message);
@@ -63,17 +58,6 @@ class SYNC_EXPORT_PRIVATE BuildCommitCommand : public SyncerCommand {
void AddClientConfigParamsToMessage(sessions::SyncSession* session,
sync_pb::CommitMessage* message);
- // Helper for computing position. Find the numeric position value
- // of the closest already-synced entry. |direction| must be one of
- // NEXT_ID or PREV_ID; this parameter controls the search direction.
- // For an open range (no predecessor or successor), the return
- // value will be kFirstPosition or kLastPosition.
- int64 FindAnchorPosition(syncable::IdField direction,
- const syncable::Entry& entry);
- // Given two values of the type returned by FindAnchorPosition,
- // compute a third value in between the two ranges.
- static int64 InterpolatePosition(int64 lo, int64 hi);
-
DISALLOW_COPY_AND_ASSIGN(BuildCommitCommand);
// A pointer to a valid transaction not owned by this class.
diff --git a/sync/engine/build_commit_command_unittest.cc b/sync/engine/build_commit_command_unittest.cc
deleted file mode 100644
index 1ad8667..0000000
--- a/sync/engine/build_commit_command_unittest.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "sync/engine/build_commit_command.h"
-#include "sync/test/engine/syncer_command_test.h"
-
-namespace syncer {
-
-// A test fixture for tests exercising ClearDataCommandTest.
-class BuildCommitCommandTest : public SyncerCommandTest { };
-
-TEST_F(BuildCommitCommandTest, InterpolatePosition) {
- EXPECT_LT(BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition());
-
- // Dense ranges.
- EXPECT_EQ(10, BuildCommitCommand::InterpolatePosition(10, 10));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 11));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 12));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 13));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 14));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 15));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 16));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 17));
- EXPECT_EQ(11, BuildCommitCommand::InterpolatePosition(10, 18));
- EXPECT_EQ(12, BuildCommitCommand::InterpolatePosition(10, 19));
- EXPECT_EQ(12, BuildCommitCommand::InterpolatePosition(10, 20));
-
- // Sparse ranges.
- EXPECT_EQ(0x32535ffe3dc97LL + BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0x32535ffe3dc97LL, 0x61abcd323122cLL));
- EXPECT_EQ(~0x61abcd323122cLL + BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- ~0x61abcd323122cLL, ~0x32535ffe3dc97LL));
-
- // Lower limits
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 0x20,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetFirstPosition() + 0x100));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 2,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetFirstPosition() + 2));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() +
- BuildCommitCommand::GetGap()/8 + 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap()));
-
- // Extremal cases.
- EXPECT_EQ(0,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetFirstPosition() + 1 +
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition() + 1,
- BuildCommitCommand::GetLastPosition() - 1));
- EXPECT_EQ(BuildCommitCommand::GetLastPosition() - 1 -
- BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(),
- BuildCommitCommand::GetLastPosition() - 1));
-
- // Edge cases around zero.
- EXPECT_EQ(BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetGap() + 1,
- BuildCommitCommand::InterpolatePosition(
- 1, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(BuildCommitCommand::GetGap() - 1,
- BuildCommitCommand::InterpolatePosition(
- -1, BuildCommitCommand::GetLastPosition()));
- EXPECT_EQ(-BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), 0));
- EXPECT_EQ(-BuildCommitCommand::GetGap() + 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), 1));
- EXPECT_EQ(-BuildCommitCommand::GetGap() - 1,
- BuildCommitCommand::InterpolatePosition(
- BuildCommitCommand::GetFirstPosition(), -1));
- EXPECT_EQ(BuildCommitCommand::GetGap() / 8,
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()));
- EXPECT_EQ(BuildCommitCommand::GetGap() / 4,
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()*2));
- EXPECT_EQ(BuildCommitCommand::GetGap(),
- BuildCommitCommand::InterpolatePosition(
- 0, BuildCommitCommand::GetGap()*2 + 1));
-}
-
-} // namespace syncer
diff --git a/sync/engine/conflict_resolver.cc b/sync/engine/conflict_resolver.cc
index 5b485a1..074eb37 100644
--- a/sync/engine/conflict_resolver.cc
+++ b/sync/engine/conflict_resolver.cc
@@ -95,55 +95,18 @@ void ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
entry.Get(syncable::SERVER_PARENT_ID);
bool entry_deleted = entry.Get(syncable::IS_DEL);
- // This positional check is meant to be necessary but not sufficient. As a
- // result, it may be false even when the position hasn't changed, possibly
- // resulting in unnecessary commits, but if it's true the position has
- // definitely not changed. The check works by verifying that the prev id
- // as calculated from the server position (which will ignore any
- // unsynced/unapplied predecessors and be root for non-bookmark datatypes)
- // matches the client prev id. Because we traverse chains of conflicting
- // items in predecessor -> successor order, we don't need to also verify the
- // successor matches (If it's in conflict, we'll verify it next. If it's
- // not, then it should be taken into account already in the
- // ComputePrevIdFromServerPosition calculation). This works even when there
- // are chains of conflicting items.
+ // The position check might fail spuriously if one of the positions was
+ // based on a legacy random suffix, rather than a deterministic one based on
+ // originator_cache_guid and originator_item_id. If an item is being
+ // modified regularly, it shouldn't take long for the suffix and position to
+ // be updated, so such false failures shouldn't be a problem for long.
//
- // Example: Original sequence was abcde. Server changes to aCDbe, while
- // client changes to aDCbe (C and D are in conflict). Locally, D's prev id
- // is a, while C's prev id is D. On the other hand, the server prev id will
- // ignore unsynced/unapplied items, so D's server prev id will also be a,
- // just like C's. Because we traverse in client predecessor->successor
- // order, we evaluate D first. Since prev id and server id match, we
- // consider the position to have remained the same for D, and will unset
- // it's UNSYNCED/UNAPPLIED bits. When we evaluate C though, we'll see that
- // the prev id is D locally while the server's prev id is a. C will
- // therefore count as a positional conflict (and the local data will be
- // overwritten by the server data typically). The final result will be
- // aCDbe (the same as the server's view). Even though both C and D were
- // modified, only one counted as being in actual conflict and was resolved
- // with local/server wins.
- //
- // In general, when there are chains of positional conflicts, only the first
- // item in chain (based on the clients point of view) will have both its
- // server prev id and local prev id match. For all the rest the server prev
- // id will be the predecessor of the first item in the chain, and therefore
- // not match the local prev id.
- //
- // Similarly, chains of conflicts where the server and client info are the
- // same are supported due to the predecessor->successor ordering. In this
- // case, from the first item onward, we unset the UNSYNCED/UNAPPLIED bits as
- // we decide that nothing changed. The subsequent item's server prev id will
- // accurately match the local prev id because the predecessor is no longer
- // UNSYNCED/UNAPPLIED.
- // TODO(zea): simplify all this once we can directly compare server position
- // to client position.
- syncable::Id server_prev_id = entry.ComputePrevIdFromServerPosition(
- entry.Get(syncable::SERVER_PARENT_ID));
- bool needs_reinsertion = !parent_matches ||
- server_prev_id != entry.GetPredecessorId();
- DVLOG_IF(1, needs_reinsertion) << "Insertion needed, server prev id "
- << " is " << server_prev_id << ", local prev id is "
- << entry.GetPredecessorId();
+ // Lucky for us, it's OK to be wrong here. The position_matches check is
+ // allowed to return false negatives, as long as it returns no false
+ // positives.
+ bool position_matches = parent_matches &&
+ entry.Get(syncable::SERVER_UNIQUE_POSITION).Equals(
+ entry.Get(syncable::UNIQUE_POSITION));
const sync_pb::EntitySpecifics& specifics =
entry.Get(syncable::SPECIFICS);
const sync_pb::EntitySpecifics& server_specifics =
@@ -189,7 +152,7 @@ void ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
if (!entry_deleted && name_matches && parent_matches && specifics_match &&
- !needs_reinsertion) {
+ position_matches) {
DVLOG(1) << "Resolving simple conflict, everything matches, ignoring "
<< "changes for: " << entry;
conflict_util::IgnoreConflict(&entry);
@@ -288,6 +251,11 @@ void ConflictResolver::ResolveConflicts(
Entry entry(trans, syncable::GET_BY_ID, prev_id);
// Any entry in conflict must be valid.
CHECK(entry.good());
+
+ // We can't traverse over a delete item.
+ if (entry.Get(syncable::IS_DEL))
+ break;
+
Id new_prev_id = entry.GetPredecessorId();
if (new_prev_id == prev_id)
break;
diff --git a/sync/engine/process_commit_response_command.cc b/sync/engine/process_commit_response_command.cc
index 96a82bd..4a921cf 100644
--- a/sync/engine/process_commit_response_command.cc
+++ b/sync/engine/process_commit_response_command.cc
@@ -13,6 +13,7 @@
#include "base/location.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/engine/syncer_util.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/sessions/sync_session.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/mutable_entry.h"
@@ -22,11 +23,6 @@
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::set;
using std::string;
using std::vector;
@@ -50,7 +46,6 @@ using syncable::IS_UNSYNCED;
using syncable::PARENT_ID;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_VERSION;
using syncable::SYNCER;
using syncable::SYNCING;
@@ -342,8 +337,11 @@ void ProcessCommitResponseCommand::UpdateServerFieldsAfterCommit(
ProtoTimeToTime(committed_entry.mtime()));
local_entry->Put(syncable::SERVER_CTIME,
ProtoTimeToTime(committed_entry.ctime()));
- local_entry->Put(syncable::SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(entry_response.position_in_parent()));
+ if (committed_entry.has_unique_position()) {
+ local_entry->Put(syncable::SERVER_UNIQUE_POSITION,
+ UniquePosition::FromProto(
+ committed_entry.unique_position()));
+ }
// TODO(nick): The server doesn't set entry_response.server_parent_id in
// practice; to update SERVER_PARENT_ID appropriately here we'd need to
@@ -386,23 +384,6 @@ void ProcessCommitResponseCommand::OverrideClientFieldsAfterCommit(
<< " to new name: " << server_name;
local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
}
-
- // The server has the final say on positioning, so apply the absolute
- // position that it returns.
- if (entry_response.has_position_in_parent()) {
- // The SERVER_ field should already have been written.
- DCHECK_EQ(entry_response.position_in_parent(),
- NodeOrdinalToInt64(local_entry->Get(SERVER_ORDINAL_IN_PARENT)));
-
- // We just committed successfully, so we assume that the position
- // value we got applies to the PARENT_ID we submitted.
- syncable::Id new_prev = local_entry->ComputePrevIdFromServerPosition(
- local_entry->Get(PARENT_ID));
- if (!local_entry->PutPredecessor(new_prev)) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
- }
}
void ProcessCommitResponseCommand::ProcessSuccessfulCommitResponse(
diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
index 5c96424..0d25c01 100644
--- a/sync/engine/process_commit_response_command_unittest.cc
+++ b/sync/engine/process_commit_response_command_unittest.cc
@@ -32,11 +32,13 @@ namespace syncer {
using sessions::SyncSession;
using syncable::BASE_VERSION;
using syncable::Entry;
+using syncable::ID;
using syncable::IS_DIR;
using syncable::IS_UNSYNCED;
using syncable::Id;
using syncable::MutableEntry;
using syncable::NON_UNIQUE_NAME;
+using syncable::UNIQUE_POSITION;
using syncable::UNITTEST;
using syncable::WriteTransaction;
@@ -132,7 +134,6 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
else
entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
entry_response->set_version(next_new_revision_++);
- entry_response->set_position_in_parent(next_server_position_++);
// If the ID of our parent item committed earlier in the batch was
// rewritten, rewrite it in the entry response. This matches
@@ -218,7 +219,6 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Entry b2(&trans, syncable::GET_BY_HANDLE, bookmark2_handle);
CheckEntry(&b1, "bookmark 1", BOOKMARKS, new_fid);
CheckEntry(&b2, "bookmark 2", BOOKMARKS, new_fid);
- ASSERT_TRUE(b2.GetSuccessorId().IsRoot());
// Look at the prefs and autofill items.
Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
@@ -230,7 +230,6 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
Entry a2(&trans, syncable::GET_BY_HANDLE, autofill2_handle);
CheckEntry(&a1, "Autofill 1", AUTOFILL, id_factory_.root());
CheckEntry(&a2, "Autofill 2", AUTOFILL, id_factory_.root());
- ASSERT_TRUE(a2.GetSuccessorId().IsRoot());
}
// In this test, we test processing a commit response for a commit batch that
@@ -256,25 +255,31 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
// Verify that the item is reachable.
{
- Id child_id;
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
ASSERT_TRUE(root.good());
- ASSERT_TRUE(directory()->GetFirstChildId(
- &trans, id_factory_.root(), &child_id));
+ Id child_id = root.GetFirstChildId();
ASSERT_EQ(folder_id, child_id);
}
// The first 25 children of the parent folder will be part of the commit
- // batch.
+ // batch. They will be placed left to right in order of creation.
int batch_size = 25;
int i = 0;
+ Id prev_id = TestIdFactory::root();
for (; i < batch_size; ++i) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
- CreateUnprocessedCommitResult(
+ int64 handle = CreateUnprocessedCommitResult(
id, folder_id, base::StringPrintf("Item %d", i), false,
BOOKMARKS, &commit_set, &request, &response);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ e.PutPredecessor(prev_id);
+ }
+ prev_id = id;
}
// The second 25 children will be unsynced items but NOT part of the commit
// batch. When the ID of the parent folder changes during the commit,
@@ -283,9 +288,17 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
for (; i < 2*batch_size; ++i) {
// Alternate between new and old child items, just for kicks.
Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
+ int64 handle = -1;
test_entry_factory_->CreateUnsyncedItem(
id, folder_id, base::StringPrintf("Item %d", i),
- false, BOOKMARKS, NULL);
+ false, BOOKMARKS, &handle);
+ {
+ syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
+ syncable::MutableEntry e(&trans, syncable::GET_BY_HANDLE, handle);
+ ASSERT_TRUE(e.good());
+ e.PutPredecessor(prev_id);
+ }
+ prev_id = id;
}
// Process the commit response for the parent folder and the first
@@ -299,9 +312,9 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
syncable::ReadTransaction trans(FROM_HERE, directory());
// Lookup the parent folder by finding a child of the root. We can't use
// folder_id here, because it changed during the commit.
- Id new_fid;
- ASSERT_TRUE(directory()->GetFirstChildId(
- &trans, id_factory_.root(), &new_fid));
+ syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
+ ASSERT_TRUE(root.good());
+ Id new_fid = root.GetFirstChildId();
ASSERT_FALSE(new_fid.IsRoot());
EXPECT_TRUE(new_fid.ServerKnows());
EXPECT_FALSE(folder_id.ServerKnows());
@@ -313,8 +326,8 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
ASSERT_LT(0, parent.Get(BASE_VERSION))
<< "Parent should have a valid (positive) server base revision";
- Id cid;
- ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
+ Id cid = parent.GetFirstChildId();
+
int child_count = 0;
// Now loop over all the children of the parent folder, verifying
// that they are in their original order by checking to see that their
diff --git a/sync/engine/process_updates_command.cc b/sync/engine/process_updates_command.cc
index 0789958..12498f6 100644
--- a/sync/engine/process_updates_command.cc
+++ b/sync/engine/process_updates_command.cc
@@ -19,11 +19,6 @@
#include "sync/syncable/syncable_write_transaction.h"
#include "sync/util/cryptographer.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using std::vector;
namespace syncer {
@@ -313,12 +308,25 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
// (on which any current or future local changes are based) before we
// overwrite SERVER_SPECIFICS.
// MTIME, CTIME, and NON_UNIQUE_NAME are not enforced.
+
+ bool position_matches = false;
+ if (target_entry.ShouldMaintainPosition() && !update.deleted()) {
+ std::string update_tag = GetUniqueBookmarkTagFromUpdate(update);
+ if (UniquePosition::IsValidSuffix(update_tag)) {
+ position_matches = GetUpdatePosition(update, update_tag).Equals(
+ target_entry.Get(syncable::SERVER_UNIQUE_POSITION));
+ } else {
+ NOTREACHED();
+ }
+ } else {
+ // If this item doesn't care about positions, then set this flag to true.
+ position_matches = true;
+ }
+
if (!update.deleted() && !target_entry.Get(syncable::SERVER_IS_DEL) &&
(SyncableIdFromProto(update.parent_id_string()) ==
target_entry.Get(syncable::SERVER_PARENT_ID)) &&
- (update.position_in_parent() ==
- NodeOrdinalToInt64(
- target_entry.Get(syncable::SERVER_ORDINAL_IN_PARENT))) &&
+ position_matches &&
update.has_specifics() && update.specifics().has_encrypted() &&
!cryptographer->CanDecrypt(update.specifics().encrypted())) {
sync_pb::EntitySpecifics prev_specifics =
diff --git a/sync/engine/process_updates_command_unittest.cc b/sync/engine/process_updates_command_unittest.cc
index f4cc583..885fbe1 100644
--- a/sync/engine/process_updates_command_unittest.cc
+++ b/sync/engine/process_updates_command_unittest.cc
@@ -4,16 +4,22 @@
#include "base/basictypes.h"
#include "sync/engine/process_updates_command.h"
+#include "sync/engine/syncer_proto_util.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/test/test_entry_factory.h"
#include "sync/sessions/sync_session.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable_id.h"
+#include "sync/syncable/syncable_proto_util.h"
+#include "sync/syncable/syncable_read_transaction.h"
+#include "sync/syncable/syncable_write_transaction.h"
#include "sync/test/engine/fake_model_worker.h"
#include "sync/test/engine/syncer_command_test.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace syncer {
+using sync_pb::SyncEntity;
using syncable::Id;
using syncable::MutableEntry;
using syncable::UNITTEST;
@@ -35,6 +41,7 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
(*mutable_routing_info())[BOOKMARKS] = GROUP_UI;
(*mutable_routing_info())[AUTOFILL] = GROUP_DB;
SyncerCommandTest::SetUp();
+ test_entry_factory_.reset(new TestEntryFactory(directory()));
}
void CreateLocalItem(const std::string& item_id,
@@ -54,18 +61,21 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
}
- void AddUpdate(sync_pb::GetUpdatesResponse* updates,
+ SyncEntity* AddUpdate(sync_pb::GetUpdatesResponse* updates,
const std::string& id, const std::string& parent,
const ModelType& type) {
sync_pb::SyncEntity* e = updates->add_entries();
- e->set_id_string("b1");
+ e->set_id_string(id);
e->set_parent_id_string(parent);
- e->set_non_unique_name("b1");
- e->set_name("b1");
+ e->set_non_unique_name(id);
+ e->set_name(id);
+ e->set_version(1000);
AddDefaultFieldValue(type, e->mutable_specifics());
+ return e;
}
ProcessUpdatesCommand command_;
+ scoped_ptr<TestEntryFactory> test_entry_factory_;
private:
DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommandTest);
@@ -74,8 +84,6 @@ class ProcessUpdatesCommandTest : public SyncerCommandTest {
TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
std::string root = syncable::GetNullId().GetServerId();
- CreateLocalItem("b1", root, BOOKMARKS);
- CreateLocalItem("b2", root, BOOKMARKS);
CreateLocalItem("p1", root, PREFERENCES);
CreateLocalItem("a1", root, AUTOFILL);
@@ -84,8 +92,6 @@ TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
sync_pb::GetUpdatesResponse* updates =
session()->mutable_status_controller()->
mutable_updates_response()->mutable_get_updates();
- AddUpdate(updates, "b1", root, BOOKMARKS);
- AddUpdate(updates, "b2", root, BOOKMARKS);
AddUpdate(updates, "p1", root, PREFERENCES);
AddUpdate(updates, "a1", root, AUTOFILL);
@@ -94,6 +100,93 @@ TEST_F(ProcessUpdatesCommandTest, GroupsToChange) {
command_.ExecuteImpl(session());
}
+static const char kCacheGuid[] = "IrcjZ2jyzHDV9Io4+zKcXQ==";
+
+// Test that the bookmark tag is set on newly downloaded items.
+TEST_F(ProcessUpdatesCommandTest, NewBookmarkTag) {
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+ Id server_id = Id::CreateFromServerId("b1");
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
+
+ e->set_originator_cache_guid(
+ std::string(kCacheGuid, arraysize(kCacheGuid)-1));
+ Id client_id = Id::CreateFromClientString("-2");
+ e->set_originator_client_item_id(client_id.GetServerId());
+ e->set_position_in_parent(0);
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+ EXPECT_TRUE(
+ UniquePosition::IsValidSuffix(entry.Get(syncable::UNIQUE_BOOKMARK_TAG)));
+ EXPECT_TRUE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+
+ // If this assertion fails, that might indicate that the algorithm used to
+ // generate bookmark tags has been modified. This could have implications for
+ // bookmark ordering. Please make sure you know what you're doing if you
+ // intend to make such a change.
+ EXPECT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=",
+ entry.Get(syncable::UNIQUE_BOOKMARK_TAG));
+}
+
+TEST_F(ProcessUpdatesCommandTest, ReceiveServerCreatedBookmarkFolders) {
+ Id server_id = Id::CreateFromServerId("xyz");
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+
+ // Create an update that mimics the bookmark root.
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, BOOKMARKS);
+ e->set_server_defined_unique_tag("google_chrome_bookmarks");
+ e->set_folder(true);
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.Get(syncable::UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(entry.Get(syncable::UNIQUE_BOOKMARK_TAG).empty());
+}
+
+TEST_F(ProcessUpdatesCommandTest, ReceiveNonBookmarkItem) {
+ Id server_id = Id::CreateFromServerId("xyz");
+ std::string root = syncable::GetNullId().GetServerId();
+ sync_pb::GetUpdatesResponse* updates =
+ session()->mutable_status_controller()->
+ mutable_updates_response()->mutable_get_updates();
+
+ SyncEntity* e =
+ AddUpdate(updates, SyncableIdToProto(server_id), root, AUTOFILL);
+ e->set_server_defined_unique_tag("9PGRuKdX5sHyGMB17CvYTXuC43I=");
+
+ EXPECT_FALSE(SyncerProtoUtil::ShouldMaintainPosition(*e));
+
+ command_.ExecuteImpl(session());
+
+ syncable::ReadTransaction trans(FROM_HERE, directory());
+ syncable::Entry entry(&trans, syncable::GET_BY_ID, server_id);
+ ASSERT_TRUE(entry.good());
+
+ EXPECT_FALSE(entry.ShouldMaintainPosition());
+ EXPECT_FALSE(entry.Get(syncable::UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(entry.Get(syncable::SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(entry.Get(syncable::UNIQUE_BOOKMARK_TAG).empty());
+}
+
} // namespace
} // namespace syncer
diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc
index 90d193a..ad0d534 100644
--- a/sync/engine/syncer.cc
+++ b/sync/engine/syncer.cc
@@ -22,14 +22,10 @@
#include "sync/engine/store_timestamps_command.h"
#include "sync/engine/syncer_types.h"
#include "sync/engine/throttled_data_type_tracker.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/syncable/mutable_entry.h"
#include "sync/syncable/syncable-inl.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely using Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
using base::Time;
using base::TimeDelta;
using sync_pb::ClientCommand;
@@ -45,8 +41,8 @@ using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_UNIQUE_POSITION;
using syncable::SERVER_VERSION;
#define ENUM_CASE(x) case x: return #x
@@ -198,20 +194,7 @@ void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
dest->Put(SERVER_IS_DEL, src->Get(SERVER_IS_DEL));
dest->Put(IS_UNAPPLIED_UPDATE, src->Get(IS_UNAPPLIED_UPDATE));
dest->Put(SERVER_SPECIFICS, src->Get(SERVER_SPECIFICS));
- dest->Put(SERVER_ORDINAL_IN_PARENT, src->Get(SERVER_ORDINAL_IN_PARENT));
-}
-
-void ClearServerData(syncable::MutableEntry* entry) {
- entry->Put(SERVER_NON_UNIQUE_NAME, "");
- entry->Put(SERVER_PARENT_ID, syncable::GetNullId());
- entry->Put(SERVER_MTIME, Time());
- entry->Put(SERVER_CTIME, Time());
- entry->Put(SERVER_VERSION, 0);
- entry->Put(SERVER_IS_DIR, false);
- entry->Put(SERVER_IS_DEL, false);
- entry->Put(IS_UNAPPLIED_UPDATE, false);
- entry->Put(SERVER_SPECIFICS, sync_pb::EntitySpecifics::default_instance());
- entry->Put(SERVER_ORDINAL_IN_PARENT, Int64ToNodeOrdinal(0));
+ dest->Put(SERVER_UNIQUE_POSITION, src->Get(SERVER_UNIQUE_POSITION));
}
} // namespace syncer
diff --git a/sync/engine/syncer.h b/sync/engine/syncer.h
index ac7892e..8656e7c 100644
--- a/sync/engine/syncer.h
+++ b/sync/engine/syncer.h
@@ -95,7 +95,6 @@ class SYNC_EXPORT_PRIVATE Syncer {
// Utility function declarations.
void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest);
-void ClearServerData(syncable::MutableEntry* entry);
const char* SyncerStepToString(const SyncerStep);
} // namespace syncer
diff --git a/sync/engine/syncer_proto_util.cc b/sync/engine/syncer_proto_util.cc
index 9138417..3b72c17 100644
--- a/sync/engine/syncer_proto_util.cc
+++ b/sync/engine/syncer_proto_util.cc
@@ -519,6 +519,16 @@ bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
}
// static
+bool SyncerProtoUtil::ShouldMaintainPosition(
+ const sync_pb::SyncEntity& sync_entity) {
+ // Maintain positions for bookmarks that are not server-defined top-level
+ // folders.
+ return GetModelType(sync_entity) == BOOKMARKS
+ && !(sync_entity.folder() &&
+ !sync_entity.server_defined_unique_tag().empty());
+}
+
+// static
void SyncerProtoUtil::CopyProtoBytesIntoBlob(const std::string& proto_bytes,
syncable::Blob* blob) {
syncable::Blob proto_blob(proto_bytes.begin(), proto_bytes.end());
diff --git a/sync/engine/syncer_proto_util.h b/sync/engine/syncer_proto_util.h
index 1f271d8..fedd211 100644
--- a/sync/engine/syncer_proto_util.h
+++ b/sync/engine/syncer_proto_util.h
@@ -69,6 +69,8 @@ class SYNC_EXPORT_PRIVATE SyncerProtoUtil {
static bool Compare(const syncable::Entry& local_entry,
const sync_pb::SyncEntity& server_entry);
+ static bool ShouldMaintainPosition(const sync_pb::SyncEntity& sync_entity);
+
// Utility methods for converting between syncable::Blobs and protobuf byte
// fields.
static void CopyProtoBytesIntoBlob(const std::string& proto_bytes,
diff --git a/sync/engine/syncer_unittest.cc b/sync/engine/syncer_unittest.cc
index 44b452f..5ee9e47 100644
--- a/sync/engine/syncer_unittest.cc
+++ b/sync/engine/syncer_unittest.cc
@@ -33,7 +33,6 @@
#include "sync/engine/traffic_recorder.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/engine/model_safe_worker.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/nigori_specifics.pb.h"
#include "sync/protocol/preference_specifics.pb.h"
@@ -59,10 +58,12 @@
using base::TimeDelta;
+using std::count;
using std::map;
using std::multimap;
using std::set;
using std::string;
+using std::vector;
namespace syncer {
@@ -96,7 +97,6 @@ using syncable::PARENT_ID;
using syncable::BASE_SERVER_SPECIFICS;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
using syncable::SERVER_VERSION;
using syncable::UNIQUE_CLIENT_TAG;
@@ -860,6 +860,12 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
EXPECT_TRUE(GetCryptographer(&wtrans)->has_pending_keys());
}
+ // We need to remember the exact position of our local items, so we can
+ // make updates that do not modify those positions.
+ UniquePosition pos1;
+ UniquePosition pos2;
+ UniquePosition pos3;
+
mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
@@ -875,6 +881,15 @@ TEST_F(SyncerTest, EncryptionAwareConflicts) {
VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
+
+ Entry entry1(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entry1.Get(syncable::UNIQUE_POSITION).Equals(
+ entry1.Get(syncable::SERVER_UNIQUE_POSITION)));
+ pos1 = entry1.Get(syncable::UNIQUE_POSITION);
+ Entry entry2(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ pos2 = entry2.Get(syncable::UNIQUE_POSITION);
+ Entry entry3(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ pos3 = entry3.Get(syncable::UNIQUE_POSITION);
}
// Server side encryption will not be applied due to undecryptable data.
@@ -1418,13 +1433,29 @@ TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
SyncShareNudge();
ASSERT_EQ(6u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
- EXPECT_TRUE(parent1_id == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(parent2_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(ids_.FromNumber(102) == mock_server_->committed_ids()[2]);
- EXPECT_TRUE(ids_.FromNumber(-103) == mock_server_->committed_ids()[3]);
- EXPECT_TRUE(ids_.FromNumber(-104) == mock_server_->committed_ids()[4]);
- EXPECT_TRUE(ids_.FromNumber(105) == mock_server_->committed_ids()[5]);
+
+ // This strange iteration and std::count() usage is to allow the order to
+ // vary. All we really care about is that parent1_id and parent2_id are the
+ // first two IDs, and that the children make up the next four. Other than
+ // that, ordering doesn't matter.
+
+ vector<syncable::Id>::const_iterator i =
+ mock_server_->committed_ids().begin();
+ vector<syncable::Id>::const_iterator parents_begin = i;
+ i++;
+ i++;
+ vector<syncable::Id>::const_iterator parents_end = i;
+ vector<syncable::Id>::const_iterator children_begin = i;
+ vector<syncable::Id>::const_iterator children_end =
+ mock_server_->committed_ids().end();
+
+ EXPECT_EQ(1, count(parents_begin, parents_end, parent1_id));
+ EXPECT_EQ(1, count(parents_begin, parents_end, parent2_id));
+
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-103)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(102)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(105)));
+ EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-104)));
}
TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
@@ -1456,10 +1487,15 @@ TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
SyncShareNudge();
ASSERT_EQ(3u, mock_server_->committed_ids().size());
- // If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+ // There are two possible valid orderings.
+ if (child2_id == mock_server_->committed_ids()[1]) {
+ EXPECT_TRUE(child2_id == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[2]);
+ } else {
+ EXPECT_TRUE(child_id_ == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+ }
}
TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
@@ -2340,7 +2376,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
SyncShareNudge();
syncable::Id id;
int64 version;
- NodeOrdinal server_ordinal_in_parent;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, syncable::GET_BY_HANDLE, entry_metahandle);
@@ -2348,7 +2383,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
id = entry.Get(ID);
EXPECT_TRUE(id.ServerKnows());
version = entry.Get(BASE_VERSION);
- server_ordinal_in_parent = entry.Get(SERVER_ORDINAL_IN_PARENT);
}
sync_pb::SyncEntity* update = mock_server_->AddUpdateFromLastCommit();
update->set_originator_cache_guid(local_cache_guid());
@@ -2357,9 +2391,6 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
EXPECT_EQ(id.GetServerId(), update->id_string());
EXPECT_EQ(root_id_.GetServerId(), update->parent_id_string());
EXPECT_EQ(version, update->version());
- EXPECT_EQ(
- NodeOrdinalToInt64(server_ordinal_in_parent),
- update->position_in_parent());
SyncShareNudge();
{
syncable::ReadTransaction trans(FROM_HERE, directory());
@@ -4702,225 +4733,6 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
}
-// A group of tests exercising the syncer's handling of sibling ordering, as
-// represented in the sync protocol.
-class SyncerPositionUpdateTest : public SyncerTest {
- public:
- SyncerPositionUpdateTest() : next_update_id_(1), next_revision_(1) {}
-
- protected:
- void ExpectLocalItemsInServerOrder() {
- if (position_map_.empty())
- return;
-
- syncable::ReadTransaction trans(FROM_HERE, directory());
-
- Id prev_id;
- DCHECK(prev_id.IsRoot());
- PosMap::iterator next = position_map_.begin();
- for (PosMap::iterator i = next++; i != position_map_.end(); ++i) {
- Id id = i->second;
- Entry entry_with_id(&trans, GET_BY_ID, id);
- EXPECT_TRUE(entry_with_id.good());
- EXPECT_EQ(prev_id, entry_with_id.GetPredecessorId());
- EXPECT_EQ(
- i->first,
- NodeOrdinalToInt64(entry_with_id.Get(SERVER_ORDINAL_IN_PARENT)));
- if (next == position_map_.end()) {
- EXPECT_EQ(Id(), entry_with_id.GetSuccessorId());
- } else {
- EXPECT_EQ(next->second, entry_with_id.GetSuccessorId());
- next++;
- }
- prev_id = id;
- }
- }
-
- void AddRootItemWithPosition(int64 position) {
- string id = string("ServerId") + base::Int64ToString(next_update_id_++);
- string name = "my name is my id -- " + id;
- int revision = next_revision_++;
- mock_server_->AddUpdateDirectory(id, kRootId, name, revision, revision,
- foreign_cache_guid(),
- ids_.NewLocalId().GetServerId());
- mock_server_->SetLastUpdatePosition(position);
- position_map_.insert(
- PosMap::value_type(position, Id::CreateFromServerId(id)));
- }
- private:
- typedef multimap<int64, Id> PosMap;
- PosMap position_map_;
- int next_update_id_;
- int next_revision_;
- DISALLOW_COPY_AND_ASSIGN(SyncerPositionUpdateTest);
-};
-
-TEST_F(SyncerPositionUpdateTest, InOrderPositive) {
- // Add a bunch of items in increasing order, starting with just positive
- // position values.
- AddRootItemWithPosition(100);
- AddRootItemWithPosition(199);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(400);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, InOrderNegative) {
- // Test negative position values, but in increasing order.
- AddRootItemWithPosition(-400);
- AddRootItemWithPosition(-201);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(100);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, ReverseOrder) {
- // Test when items are sent in the reverse order.
- AddRootItemWithPosition(400);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(100);
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(-201);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(-400);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-TEST_F(SyncerPositionUpdateTest, RandomOrderInBatches) {
- // Mix it all up, interleaving position values, and try multiple batches of
- // updates.
- AddRootItemWithPosition(400);
- AddRootItemWithPosition(201);
- AddRootItemWithPosition(-400);
- AddRootItemWithPosition(100);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-
- AddRootItemWithPosition(-150);
- AddRootItemWithPosition(-200);
- AddRootItemWithPosition(200);
- AddRootItemWithPosition(-201);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-
- AddRootItemWithPosition(-144);
-
- SyncShareNudge();
- ExpectLocalItemsInServerOrder();
-}
-
-class SyncerPositionTiebreakingTest : public SyncerTest {
- public:
- SyncerPositionTiebreakingTest()
- : low_id_(Id::CreateFromServerId("A")),
- mid_id_(Id::CreateFromServerId("M")),
- high_id_(Id::CreateFromServerId("Z")),
- next_revision_(1) {
- DCHECK(low_id_ < mid_id_);
- DCHECK(mid_id_ < high_id_);
- DCHECK(low_id_ < high_id_);
- }
-
- // Adds the item by its Id, using a constant value for the position
- // so that the syncer has to resolve the order some other way.
- void Add(const Id& id) {
- int revision = next_revision_++;
- mock_server_->AddUpdateDirectory(id.GetServerId(), kRootId,
- id.GetServerId(), revision, revision,
- foreign_cache_guid(), ids_.NewLocalId().GetServerId());
- // The update position doesn't vary.
- mock_server_->SetLastUpdatePosition(90210);
- }
-
- void ExpectLocalOrderIsByServerId() {
- syncable::ReadTransaction trans(FROM_HERE, directory());
- Id null_id;
- Entry low(&trans, GET_BY_ID, low_id_);
- Entry mid(&trans, GET_BY_ID, mid_id_);
- Entry high(&trans, GET_BY_ID, high_id_);
- EXPECT_TRUE(low.good());
- EXPECT_TRUE(mid.good());
- EXPECT_TRUE(high.good());
- EXPECT_TRUE(low.GetPredecessorId() == null_id);
- EXPECT_TRUE(mid.GetPredecessorId() == low_id_);
- EXPECT_TRUE(high.GetPredecessorId() == mid_id_);
- EXPECT_TRUE(high.GetSuccessorId() == null_id);
- EXPECT_TRUE(mid.GetSuccessorId() == high_id_);
- EXPECT_TRUE(low.GetSuccessorId() == mid_id_);
- }
-
- protected:
- // When there's a tiebreak on the numeric position, it's supposed to be
- // broken by string comparison of the ids. These ids are in increasing
- // order.
- const Id low_id_;
- const Id mid_id_;
- const Id high_id_;
-
- private:
- int next_revision_;
- DISALLOW_COPY_AND_ASSIGN(SyncerPositionTiebreakingTest);
-};
-
-TEST_F(SyncerPositionTiebreakingTest, LowMidHigh) {
- Add(low_id_);
- Add(mid_id_);
- Add(high_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, LowHighMid) {
- Add(low_id_);
- Add(high_id_);
- Add(mid_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, HighMidLow) {
- Add(high_id_);
- Add(mid_id_);
- Add(low_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, HighLowMid) {
- Add(high_id_);
- Add(low_id_);
- Add(mid_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, MidHighLow) {
- Add(mid_id_);
- Add(high_id_);
- Add(low_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
-TEST_F(SyncerPositionTiebreakingTest, MidLowHigh) {
- Add(mid_id_);
- Add(low_id_);
- Add(high_id_);
- SyncShareNudge();
- ExpectLocalOrderIsByServerId();
-}
-
enum {
TEST_PARAM_BOOKMARK_ENABLE_BIT,
TEST_PARAM_AUTOFILL_ENABLE_BIT,
diff --git a/sync/engine/syncer_util.cc b/sync/engine/syncer_util.cc
index 3548ba1..c571b10 100644
--- a/sync/engine/syncer_util.cc
+++ b/sync/engine/syncer_util.cc
@@ -9,12 +9,15 @@
#include <string>
#include <vector>
+#include "base/base64.h"
#include "base/location.h"
#include "base/metrics/histogram.h"
+#include "base/string_number_conversions.h"
#include "sync/engine/conflict_resolver.h"
#include "sync/engine/syncer_proto_util.h"
#include "sync/engine/syncer_types.h"
#include "sync/internal_api/public/base/model_type.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/protocol/password_specifics.pb.h"
#include "sync/protocol/sync.pb.h"
@@ -29,13 +32,9 @@
#include "sync/util/cryptographer.h"
#include "sync/util/time.h"
-// TODO(vishwath): Remove this include after node positions have
-// shifted to completely uing Ordinals.
-// See http://crbug.com/145412 .
-#include "sync/internal_api/public/base/node_ordinal.h"
-
namespace syncer {
+using syncable::BASE_SERVER_SPECIFICS;
using syncable::BASE_VERSION;
using syncable::CHANGES_VERSION;
using syncable::CREATE_NEW_UPDATE_ITEM;
@@ -54,7 +53,6 @@ using syncable::META_HANDLE;
using syncable::MTIME;
using syncable::MutableEntry;
using syncable::NON_UNIQUE_NAME;
-using syncable::BASE_SERVER_SPECIFICS;
using syncable::PARENT_ID;
using syncable::SERVER_CTIME;
using syncable::SERVER_IS_DEL;
@@ -62,13 +60,15 @@ using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
-using syncable::SERVER_ORDINAL_IN_PARENT;
using syncable::SERVER_SPECIFICS;
+using syncable::SERVER_UNIQUE_POSITION;
using syncable::SERVER_VERSION;
-using syncable::UNIQUE_CLIENT_TAG;
-using syncable::UNIQUE_SERVER_TAG;
using syncable::SPECIFICS;
using syncable::SYNCER;
+using syncable::UNIQUE_BOOKMARK_TAG;
+using syncable::UNIQUE_CLIENT_TAG;
+using syncable::UNIQUE_POSITION;
+using syncable::UNIQUE_SERVER_TAG;
using syncable::WriteTransaction;
syncable::Id FindLocalIdToUpdate(
@@ -273,7 +273,32 @@ UpdateAttemptResponse AttemptToUpdateEntry(
return SUCCESS;
}
+std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) {
+ if (!update.has_originator_cache_guid() ||
+ !update.has_originator_client_item_id()) {
+ return std::string();
+ }
+
+ return syncable::GenerateSyncableBookmarkHash(
+ update.originator_cache_guid(), update.originator_client_item_id());
+}
+
+UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
+ const std::string& suffix) {
+ DCHECK(UniquePosition::IsValidSuffix(suffix));
+ if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) {
+ return UniquePosition::CreateInvalid();
+ } else if (update.has_unique_position()) {
+ return UniquePosition::FromProto(update.unique_position());
+ } else if (update.has_position_in_parent()) {
+ return UniquePosition::FromInt64(update.position_in_parent(), suffix);
+ } else {
+ return UniquePosition::CreateInvalid();
+ }
+}
+
namespace {
+
// Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
// when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
// protocol.
@@ -294,9 +319,34 @@ void UpdateBookmarkSpecifics(const std::string& singleton_tag,
local_entry->Put(SERVER_SPECIFICS, pb);
}
+void UpdateBookmarkPositioning(const sync_pb::SyncEntity& update,
+ MutableEntry* local_entry) {
+ // Update our unique bookmark tag. In many cases this will be identical to
+ // the tag we already have. However, clients that have recently upgraded to
+ // versions that support unique positions will have incorrect tags. See the
+ // v86 migration logic in directory_backing_store.cc for more information.
+ //
+ // Both the old and new values are unique to this element. Applying this
+ // update will not risk the creation of conflicting unique tags.
+ std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update);
+ if (UniquePosition::IsValidSuffix(bookmark_tag)) {
+ local_entry->PutUniqueBookmarkTag(bookmark_tag);
+ }
+
+ // Update our position.
+ UniquePosition update_pos =
+ GetUpdatePosition(update, local_entry->Get(UNIQUE_BOOKMARK_TAG));
+ if (update_pos.IsValid()) {
+ local_entry->Put(syncable::SERVER_UNIQUE_POSITION, update_pos);
+ } else {
+ // TODO(sync): This and other cases of unexpected input should be handled
+ // better.
+ NOTREACHED();
+ }
+}
+
} // namespace
-// Pass in name and checksum because of UTF8 conversion.
void UpdateServerFieldsFromUpdate(
MutableEntry* target,
const sync_pb::SyncEntity& update,
@@ -355,9 +405,9 @@ void UpdateServerFieldsFromUpdate(
bookmark.bookmark_favicon(),
target);
}
- if (update.has_position_in_parent())
- target->Put(SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(update.position_in_parent()));
+ if (SyncerProtoUtil::ShouldMaintainPosition(update)) {
+ UpdateBookmarkPositioning(update, target);
+ }
target->Put(SERVER_IS_DEL, update.deleted());
// We only mark the entry as unapplied if its version is greater than the
@@ -378,21 +428,6 @@ void CreateNewEntry(syncable::WriteTransaction *trans,
}
}
-void SplitServerInformationIntoNewEntry(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry) {
- syncable::Id id = entry->Get(ID);
- ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
- entry->Put(BASE_VERSION, 0);
-
- MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
- CopyServerFields(entry, &new_entry);
- ClearServerData(entry);
-
- DVLOG(1) << "Splitting server information, local entry: " << *entry
- << " server entry: " << new_entry;
-}
-
// This function is called on an entry when we can update the user-facing data
// from the server data.
void UpdateLocalDataFromServerData(
@@ -415,11 +450,8 @@ void UpdateLocalDataFromServerData(
} else {
entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME));
entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID));
+ entry->Put(UNIQUE_POSITION, entry->Get(SERVER_UNIQUE_POSITION));
CHECK(entry->Put(IS_DEL, false));
- Id new_predecessor =
- entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID));
- CHECK(entry->PutPredecessor(new_predecessor))
- << " Illegal predecessor after converting from server position.";
}
entry->Put(CTIME, entry->Get(SERVER_CTIME));
@@ -448,47 +480,6 @@ VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
return VERIFY_OK;
}
-bool AddItemThenPredecessors(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids) {
-
- if (!inserted_items->insert(item->Get(META_HANDLE)).second)
- return false;
- commit_ids->push_back(item->Get(ID));
- if (item->Get(IS_DEL))
- return true; // Deleted items have no predecessors.
-
- Id prev_id = item->GetPredecessorId();
- while (!prev_id.IsRoot()) {
- Entry prev(trans, GET_BY_ID, prev_id);
- CHECK(prev.good()) << "Bad id when walking predecessors.";
- if (!prev.Get(inclusion_filter))
- break;
- if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
- break;
- commit_ids->push_back(prev_id);
- prev_id = prev.GetPredecessorId();
- }
- return true;
-}
-
-void AddPredecessorsThenItem(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids) {
- size_t initial_size = commit_ids->size();
- if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
- commit_ids))
- return;
- // Reverse what we added to get the correct order.
- std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
-}
-
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
std::set<syncable::Id>* deleted_folders) {
diff --git a/sync/engine/syncer_util.h b/sync/engine/syncer_util.h
index 108622c..ea947ab 100644
--- a/sync/engine/syncer_util.h
+++ b/sync/engine/syncer_util.h
@@ -48,6 +48,21 @@ UpdateAttemptResponse AttemptToUpdateEntry(
syncable::MutableEntry* const entry,
Cryptographer* cryptographer);
+// Returns the most accurate position information available in this update. It
+// prefers to use the unique_position() field, but will fall back to using the
+// int64-based position_in_parent if necessary.
+//
+// The suffix parameter is the unique bookmark tag for the item being updated.
+//
+// Will return an invalid position if no valid position can be constructed, or
+// if this type does not support positioning.
+UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
+ const std::string& suffix);
+
+// Fetch the cache_guid and item_id-based unique bookmark tag from an update.
+// Will return an empty string if someting unexpected happens.
+std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update);
+
// Pass in name to avoid redundant UTF8 conversion.
void UpdateServerFieldsFromUpdate(
syncable::MutableEntry* local_entry,
@@ -58,10 +73,6 @@ void UpdateServerFieldsFromUpdate(
void CreateNewEntry(syncable::WriteTransaction *trans,
const syncable::Id& id);
-void SplitServerInformationIntoNewEntry(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* entry);
-
// This function is called on an entry when we can update the user-facing data
// from the server data.
void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
@@ -88,32 +99,6 @@ VerifyResult VerifyUndelete(syncable::WriteTransaction* trans,
const sync_pb::SyncEntity& update,
syncable::MutableEntry* target);
-// Append |item|, followed by a chain of its predecessors selected by
-// |inclusion_filter|, to the |commit_ids| vector and tag them as included by
-// storing in the set |inserted_items|. |inclusion_filter| (typically one of
-// IS_UNAPPLIED_UPDATE or IS_UNSYNCED) selects which type of predecessors to
-// include. Returns true if |item| was added, and false if it was already in
-// the list.
-//
-// Use AddPredecessorsThenItem instead of this method if you want the
-// item to be the last, rather than first, item appended.
-bool AddItemThenPredecessors(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids);
-
-// Exactly like AddItemThenPredecessors, except items are appended in the
-// reverse (and generally more useful) order: a chain of predecessors from
-// far to near, and finally the item.
-void AddPredecessorsThenItem(
- syncable::BaseTransaction* trans,
- syncable::Entry* item,
- syncable::IndexedBitField inclusion_filter,
- syncable::MetahandleSet* inserted_items,
- std::vector<syncable::Id>* commit_ids);
-
void MarkDeletedChildrenSynced(
syncable::Directory* dir,
std::set<syncable::Id>* deleted_folders);
diff --git a/sync/internal_api/base_node.cc b/sync/internal_api/base_node.cc
index 4ae318b..b45787f 100644
--- a/sync/internal_api/base_node.cc
+++ b/sync/internal_api/base_node.cc
@@ -209,20 +209,13 @@ int64 BaseNode::GetSuccessorId() const {
}
int64 BaseNode::GetFirstChildId() const {
- syncable::Directory* dir = GetTransaction()->GetDirectory();
- syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
- syncable::Id id_string;
- // TODO(akalin): Propagate up the error further (see
- // http://crbug.com/100907).
- CHECK(dir->GetFirstChildId(trans,
- GetEntry()->Get(syncable::ID), &id_string));
+ syncable::Id id_string = GetEntry()->GetFirstChildId();
if (id_string.IsRoot())
return kInvalidId;
return IdToMetahandle(GetTransaction()->GetWrappedTrans(), id_string);
}
int BaseNode::GetTotalNodeCount() const {
- syncable::Directory* dir = GetTransaction()->GetDirectory();
syncable::BaseTransaction* trans = GetTransaction()->GetWrappedTrans();
int count = 1; // Start with one to include the node itself.
@@ -238,13 +231,14 @@ int BaseNode::GetTotalNodeCount() const {
syncable::Entry entry(trans, syncable::GET_BY_HANDLE, handle);
if (!entry.good())
continue;
- syncable::Id id = entry.Get(syncable::ID);
- syncable::Id child_id;
- if (dir->GetFirstChildId(trans, id, &child_id) && !child_id.IsRoot())
- stack.push(IdToMetahandle(trans, child_id));
syncable::Id successor_id = entry.GetSuccessorId();
if (!successor_id.IsRoot())
stack.push(IdToMetahandle(trans, successor_id));
+ if (!entry.Get(syncable::IS_DIR))
+ continue;
+ syncable::Id child_id = entry.GetFirstChildId();
+ if (!child_id.IsRoot())
+ stack.push(IdToMetahandle(trans, child_id));
}
return count;
}
@@ -261,21 +255,23 @@ DictionaryValue* BaseNode::GetSummaryAsValue() const {
DictionaryValue* BaseNode::GetDetailsAsValue() const {
DictionaryValue* node_info = GetSummaryAsValue();
node_info->SetString(
- "modificationTime",
- GetTimeDebugString(GetModificationTime()));
+ "modificationTime", GetTimeDebugString(GetModificationTime()));
node_info->SetString("parentId", base::Int64ToString(GetParentId()));
// Specifics are already in the Entry value, so no need to duplicate
// it here.
- node_info->SetString("externalId",
- base::Int64ToString(GetExternalId()));
- node_info->SetString("predecessorId",
- base::Int64ToString(GetPredecessorId()));
- node_info->SetString("successorId",
- base::Int64ToString(GetSuccessorId()));
- node_info->SetString("firstChildId",
- base::Int64ToString(GetFirstChildId()));
- node_info->Set("entry",
- GetEntry()->ToValue(GetTransaction()->GetCryptographer()));
+ node_info->SetString("externalId", base::Int64ToString(GetExternalId()));
+ if (GetEntry()->ShouldMaintainPosition() &&
+ !GetEntry()->Get(syncable::IS_DEL)) {
+ node_info->SetString("successorId", base::Int64ToString(GetSuccessorId()));
+ node_info->SetString(
+ "predecessorId", base::Int64ToString(GetPredecessorId()));
+ }
+ if (GetEntry()->Get(syncable::IS_DIR)) {
+ node_info->SetString(
+ "firstChildId", base::Int64ToString(GetFirstChildId()));
+ }
+ node_info->Set(
+ "entry", GetEntry()->ToValue(GetTransaction()->GetCryptographer()));
return node_info;
}
diff --git a/sync/internal_api/change_reorder_buffer.cc b/sync/internal_api/change_reorder_buffer.cc
index 5358fa2..0ddb6b3 100644
--- a/sync/internal_api/change_reorder_buffer.cc
+++ b/sync/internal_api/change_reorder_buffer.cc
@@ -154,8 +154,9 @@ bool ChangeReorderBuffer::GetAllChangesInTreeOrder(
CHECK_EQ(BaseNode::INIT_OK, node.InitByIdLookup(i->first));
// We only care about parents of entry's with position-sensitive models.
- if (ShouldMaintainPosition(node.GetEntry()->GetModelType())) {
+ if (node.GetEntry()->ShouldMaintainPosition()) {
parents_of_position_changes.insert(node.GetParentId());
+ traversal.ExpandToInclude(trans, node.GetParentId());
}
}
}
@@ -200,12 +201,7 @@ bool ChangeReorderBuffer::GetAllChangesInTreeOrder(
// There were ordering changes on the children of this parent, so
// enumerate all the children in the sibling order.
syncable::Entry parent(trans, syncable::GET_BY_HANDLE, next);
- syncable::Id id;
- if (!trans->directory()->GetFirstChildId(
- trans, parent.Get(syncable::ID), &id)) {
- *changes = ImmutableChangeRecordList();
- return false;
- }
+ syncable::Id id = parent.GetFirstChildId();
while (!id.IsRoot()) {
syncable::Entry child(trans, syncable::GET_BY_ID, id);
CHECK(child.good());
diff --git a/sync/internal_api/public/base/model_type.h b/sync/internal_api/public/base/model_type.h
index 92df116..7a95d8c 100644
--- a/sync/internal_api/public/base/model_type.h
+++ b/sync/internal_api/public/base/model_type.h
@@ -158,10 +158,6 @@ SYNC_EXPORT_PRIVATE ModelType GetModelType(
SYNC_EXPORT ModelType GetModelTypeFromSpecifics(
const sync_pb::EntitySpecifics& specifics);
-// If this returns false, we shouldn't bother maintaining a position
-// value (sibling ordering) for this item.
-bool ShouldMaintainPosition(ModelType model_type);
-
// Protocol types are those types that have actual protocol buffer
// representations. This distinguishes them from Proxy types, which have no
// protocol representation and are never sent to the server.
diff --git a/sync/internal_api/public/base/unique_position.cc b/sync/internal_api/public/base/unique_position.cc
index 839fc82..a47d42b 100644
--- a/sync/internal_api/public/base/unique_position.cc
+++ b/sync/internal_api/public/base/unique_position.cc
@@ -117,6 +117,13 @@ void UniquePosition::ToProto(sync_pb::UniquePosition* proto) const {
proto->set_value(bytes_);
}
+void UniquePosition::SerializeToString(std::string* blob) const {
+ DCHECK(blob);
+ sync_pb::UniquePosition proto;
+ ToProto(&proto);
+ proto.SerializeToString(blob);
+}
+
int64 UniquePosition::ToInt64() const {
uint64 y = 0;
const std::string& s = bytes_;
@@ -140,6 +147,9 @@ bool UniquePosition::IsValid() const {
}
std::string UniquePosition::ToDebugString() const {
+ if (bytes_.empty())
+ return std::string("INVALID[]");
+
std::string debug_string = base::HexEncode(bytes_.data(), bytes_.length());
if (!IsValid()) {
debug_string = "INVALID[" + debug_string + "]";
diff --git a/sync/internal_api/public/base/unique_position.h b/sync/internal_api/public/base/unique_position.h
index 3fbd5af..f1ff950 100644
--- a/sync/internal_api/public/base/unique_position.h
+++ b/sync/internal_api/public/base/unique_position.h
@@ -78,6 +78,9 @@ class SYNC_EXPORT_PRIVATE UniquePosition {
// Serializes the position's internal state to a protobuf.
void ToProto(sync_pb::UniquePosition* proto) const;
+ // Serializes the protobuf representation of this object as a string.
+ void SerializeToString(std::string* blob) const;
+
// Returns a human-readable representation of this item's internal state.
std::string ToDebugString() const;
diff --git a/sync/internal_api/public/base/unique_position_unittest.cc b/sync/internal_api/public/base/unique_position_unittest.cc
index 0f979c8..659ca60 100644
--- a/sync/internal_api/public/base/unique_position_unittest.cc
+++ b/sync/internal_api/public/base/unique_position_unittest.cc
@@ -69,7 +69,9 @@ const UniquePosition kSmallPositionPlusOne = FromBytes(
const std::string kMinSuffix =
std::string(UniquePosition::kSuffixLength - 1, '\x00') + '\x01';
const std::string kMaxSuffix(UniquePosition::kSuffixLength, '\xFF');
-const std::string kNormalSuffix(UniquePosition::kSuffixLength, '\xAB');
+const std::string kNormalSuffix(
+ "\x68\x44\x6C\x6B\x32\x58\x78\x34\x69\x70\x46\x34\x79\x49"
+ "\x44\x4F\x66\x4C\x58\x41\x31\x34\x68\x59\x56\x43\x6F\x3D");
::testing::AssertionResult LessThan(const char* m_expr,
const char* n_expr,
diff --git a/sync/internal_api/sync_manager_impl.cc b/sync/internal_api/sync_manager_impl.cc
index 017fdf5..363fa81 100644
--- a/sync/internal_api/sync_manager_impl.cc
+++ b/sync/internal_api/sync_manager_impl.cc
@@ -56,6 +56,7 @@ namespace syncer {
using sessions::SyncSessionContext;
using syncable::ImmutableWriteTransactionInfo;
using syncable::SPECIFICS;
+using syncable::UNIQUE_POSITION;
namespace {
@@ -225,12 +226,9 @@ bool SyncManagerImpl::VisiblePositionsDiffer(
const syncable::EntryKernelMutation& mutation) const {
const syncable::EntryKernel& a = mutation.original;
const syncable::EntryKernel& b = mutation.mutated;
- // If the datatype isn't one where the browser model cares about position,
- // don't bother notifying that data model of position-only changes.
- if (!ShouldMaintainPosition(GetModelTypeFromSpecifics(b.ref(SPECIFICS)))) {
+ if (!b.ShouldMaintainPosition())
return false;
- }
- if (a.ref(syncable::NEXT_ID) != b.ref(syncable::NEXT_ID))
+ if (!a.ref(UNIQUE_POSITION).Equals(b.ref(UNIQUE_POSITION)))
return true;
if (a.ref(syncable::PARENT_ID) != b.ref(syncable::PARENT_ID))
return true;
diff --git a/sync/internal_api/sync_manager_impl.h b/sync/internal_api/sync_manager_impl.h
index c0373cf..acc0175 100644
--- a/sync/internal_api/sync_manager_impl.h
+++ b/sync/internal_api/sync_manager_impl.h
@@ -220,11 +220,10 @@ class SYNC_EXPORT_PRIVATE SyncManagerImpl :
typedef std::map<std::string, JsMessageHandler> JsMessageHandlerMap;
// Determine if the parents or predecessors differ between the old and new
- // versions of an entry stored in |a| and |b|. Note that a node's index may
- // change without its NEXT_ID changing if the node at NEXT_ID also moved (but
- // the relative order is unchanged). To handle such cases, we rely on the
- // caller to treat a position update on any sibling as updating the positions
- // of all siblings.
+ // versions of an entry. Note that a node's index may change without its
+ // UNIQUE_POSITION changing if its sibling nodes were changed. To handle such
+ // cases, we rely on the caller to treat a position update on any sibling as
+ // updating the positions of all siblings.
bool VisiblePositionsDiffer(
const syncable::EntryKernelMutation& mutation) const;
diff --git a/sync/internal_api/sync_manager_impl_unittest.cc b/sync/internal_api/sync_manager_impl_unittest.cc
index 5e4d0ae..9a6ddee 100644
--- a/sync/internal_api/sync_manager_impl_unittest.cc
+++ b/sync/internal_api/sync_manager_impl_unittest.cc
@@ -532,6 +532,8 @@ namespace {
void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
bool is_detailed) {
+ size_t expected_field_count = 4;
+
ExpectInt64Value(node.GetId(), value, "id");
{
bool is_folder = false;
@@ -539,28 +541,22 @@ void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
EXPECT_EQ(node.GetIsFolder(), is_folder);
}
ExpectDictStringValue(node.GetTitle(), value, "title");
- {
- ModelType expected_model_type = node.GetModelType();
- std::string type_str;
- EXPECT_TRUE(value.GetString("type", &type_str));
- if (expected_model_type >= FIRST_REAL_MODEL_TYPE) {
- ModelType model_type = ModelTypeFromString(type_str);
- EXPECT_EQ(expected_model_type, model_type);
- } else if (expected_model_type == TOP_LEVEL_FOLDER) {
- EXPECT_EQ("Top-level folder", type_str);
- } else if (expected_model_type == UNSPECIFIED) {
- EXPECT_EQ("Unspecified", type_str);
- } else {
- ADD_FAILURE();
- }
+
+ ModelType expected_model_type = node.GetModelType();
+ std::string type_str;
+ EXPECT_TRUE(value.GetString("type", &type_str));
+ if (expected_model_type >= FIRST_REAL_MODEL_TYPE) {
+ ModelType model_type = ModelTypeFromString(type_str);
+ EXPECT_EQ(expected_model_type, model_type);
+ } else if (expected_model_type == TOP_LEVEL_FOLDER) {
+ EXPECT_EQ("Top-level folder", type_str);
+ } else if (expected_model_type == UNSPECIFIED) {
+ EXPECT_EQ("Unspecified", type_str);
+ } else {
+ ADD_FAILURE();
}
+
if (is_detailed) {
- ExpectInt64Value(node.GetParentId(), value, "parentId");
- ExpectTimeValue(node.GetModificationTime(), value, "modificationTime");
- ExpectInt64Value(node.GetExternalId(), value, "externalId");
- ExpectInt64Value(node.GetPredecessorId(), value, "predecessorId");
- ExpectInt64Value(node.GetSuccessorId(), value, "successorId");
- ExpectInt64Value(node.GetFirstChildId(), value, "firstChildId");
{
scoped_ptr<base::DictionaryValue> expected_entry(
node.GetEntry()->ToValue(NULL));
@@ -568,10 +564,27 @@ void CheckNodeValue(const BaseNode& node, const base::DictionaryValue& value,
EXPECT_TRUE(value.Get("entry", &entry));
EXPECT_TRUE(base::Value::Equals(entry, expected_entry.get()));
}
- EXPECT_EQ(11u, value.size());
- } else {
- EXPECT_EQ(4u, value.size());
+
+ ExpectInt64Value(node.GetParentId(), value, "parentId");
+ ExpectTimeValue(node.GetModificationTime(), value, "modificationTime");
+ ExpectInt64Value(node.GetExternalId(), value, "externalId");
+ expected_field_count += 4;
+
+ if (value.HasKey("predecessorId")) {
+ ExpectInt64Value(node.GetPredecessorId(), value, "predecessorId");
+ expected_field_count++;
+ }
+ if (value.HasKey("successorId")) {
+ ExpectInt64Value(node.GetSuccessorId(), value, "successorId");
+ expected_field_count++;
+ }
+ if (value.HasKey("firstChildId")) {
+ ExpectInt64Value(node.GetFirstChildId(), value, "firstChildId");
+ expected_field_count++;
+ }
}
+
+ EXPECT_EQ(expected_field_count, value.size());
}
} // namespace
diff --git a/sync/internal_api/test/test_entry_factory.cc b/sync/internal_api/test/test_entry_factory.cc
index e53c458..f9d7d49 100644
--- a/sync/internal_api/test/test_entry_factory.cc
+++ b/sync/internal_api/test/test_entry_factory.cc
@@ -99,13 +99,6 @@ void TestEntryFactory::CreateUnsyncedItem(
WriteTransaction trans(FROM_HERE, UNITTEST, directory_);
- Id predecessor_id;
- if (model_type == BOOKMARKS) {
- bool lookup_result = directory_->GetLastChildIdForTest(
- &trans, parent_id, &predecessor_id);
- DCHECK(lookup_result);
- }
-
MutableEntry entry(&trans, syncable::CREATE, model_type, parent_id, name);
DCHECK(entry.good());
entry.Put(syncable::ID, item_id);
@@ -119,12 +112,6 @@ void TestEntryFactory::CreateUnsyncedItem(
AddDefaultFieldValue(model_type, &default_specifics);
entry.Put(syncable::SPECIFICS, default_specifics);
- // Bookmarks get inserted at the end of the list.
- if (model_type == BOOKMARKS) {
- bool put_result = entry.PutPredecessor(predecessor_id);
- DCHECK(put_result);
- }
-
if (item_id.ServerKnows()) {
entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
entry.Put(syncable::SERVER_IS_DIR, false);
@@ -177,12 +164,6 @@ int64 TestEntryFactory::CreateSyncedItem(
entry.Put(syncable::IS_DEL, false);
entry.Put(syncable::PARENT_ID, parent_id);
- // TODO(sync): Place bookmarks at the end of the list?
- if (!entry.PutPredecessor(TestIdFactory::root())) {
- NOTREACHED();
- return syncable::kInvalidMetaHandle;
- }
-
entry.Put(syncable::SERVER_VERSION, GetNextRevision());
entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
entry.Put(syncable::SERVER_NON_UNIQUE_NAME, name);
diff --git a/sync/protocol/proto_value_conversions.cc b/sync/protocol/proto_value_conversions.cc
index d102b7c..fbf90eb 100644
--- a/sync/protocol/proto_value_conversions.cc
+++ b/sync/protocol/proto_value_conversions.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/string_number_conversions.h"
#include "base/values.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/app_notification_specifics.pb.h"
#include "sync/protocol/app_setting_specifics.pb.h"
#include "sync/protocol/app_specifics.pb.h"
@@ -35,6 +36,7 @@
#include "sync/protocol/synced_notification_specifics.pb.h"
#include "sync/protocol/theme_specifics.pb.h"
#include "sync/protocol/typed_url_specifics.pb.h"
+#include "sync/protocol/unique_position.pb.h"
namespace syncer {
@@ -542,6 +544,12 @@ base::DictionaryValue* EntitySpecificsToValue(
namespace {
+StringValue* UniquePositionToStringValue(
+ const sync_pb::UniquePosition& proto) {
+ UniquePosition pos = UniquePosition::FromProto(proto);
+ return new StringValue(pos.ToDebugString());
+}
+
base::DictionaryValue* SyncEntityToValue(const sync_pb::SyncEntity& proto,
bool include_specifics) {
base::DictionaryValue* value = new base::DictionaryValue();
@@ -556,6 +564,7 @@ base::DictionaryValue* SyncEntityToValue(const sync_pb::SyncEntity& proto,
SET_INT64(sync_timestamp);
SET_STR(server_defined_unique_tag);
SET_INT64(position_in_parent);
+ SET(unique_position, UniquePositionToStringValue);
SET_STR(insert_after_item_id);
SET_BOOL(deleted);
SET_STR(originator_cache_guid);
diff --git a/sync/sync_core.gypi b/sync/sync_core.gypi
index 716f2af..d9eb244 100644
--- a/sync/sync_core.gypi
+++ b/sync/sync_core.gypi
@@ -137,7 +137,12 @@
'syncable/nigori_util.h',
'syncable/on_disk_directory_backing_store.cc',
'syncable/on_disk_directory_backing_store.h',
+ 'syncable/parent_child_index.cc',
+ 'syncable/parent_child_index.h',
+ 'syncable/scoped_index_updater.h',
'syncable/scoped_kernel_lock.h',
+ 'syncable/scoped_parent_child_index_updater.cc',
+ 'syncable/scoped_parent_child_index_updater.h',
'syncable/syncable-inl.h',
'syncable/syncable_base_transaction.cc',
'syncable/syncable_base_transaction.h',
diff --git a/sync/sync_tests.gypi b/sync/sync_tests.gypi
index 059c930..30e06db 100644
--- a/sync/sync_tests.gypi
+++ b/sync/sync_tests.gypi
@@ -243,7 +243,6 @@
'engine/apply_control_data_updates_unittest.cc',
'engine/apply_updates_and_resolve_conflicts_command_unittest.cc',
'engine/backoff_delay_provider_unittest.cc',
- 'engine/build_commit_command_unittest.cc',
'engine/download_updates_command_unittest.cc',
'engine/model_changing_syncer_command_unittest.cc',
'engine/process_commit_response_command_unittest.cc',
@@ -267,6 +266,7 @@
'syncable/directory_backing_store_unittest.cc',
'syncable/model_type_unittest.cc',
'syncable/nigori_util_unittest.cc',
+ 'syncable/parent_child_index_unittest.cc',
'syncable/syncable_enum_conversions_unittest.cc',
'syncable/syncable_id_unittest.cc',
'syncable/syncable_unittest.cc',
diff --git a/sync/syncable/directory.cc b/sync/syncable/directory.cc
index a9e0f65..a844e66 100644
--- a/sync/syncable/directory.cc
+++ b/sync/syncable/directory.cc
@@ -4,17 +4,18 @@
#include "sync/syncable/directory.h"
+#include "base/base64.h"
#include "base/debug/trace_event.h"
-#include "base/perftimer.h"
#include "base/stl_util.h"
#include "base/string_number_conversions.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/in_memory_directory_backing_store.h"
#include "sync/syncable/on_disk_directory_backing_store.h"
#include "sync/syncable/scoped_index_updater.h"
+#include "sync/syncable/scoped_parent_child_index_updater.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_base_transaction.h"
#include "sync/syncable/syncable_changes_version.h"
@@ -36,7 +37,6 @@ void InitializeIndexEntry(EntryKernel* entry,
index->insert(entry);
}
}
-
}
// static
@@ -44,29 +44,6 @@ bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
return !a->ref(UNIQUE_CLIENT_TAG).empty();
}
-bool ParentIdAndHandleIndexer::Comparator::operator() (
- const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const {
- int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
- if (cmp != 0)
- return cmp < 0;
-
- const NodeOrdinal& a_position = a->ref(SERVER_ORDINAL_IN_PARENT);
- const NodeOrdinal& b_position = b->ref(SERVER_ORDINAL_IN_PARENT);
- if (!a_position.Equals(b_position))
- return a_position.LessThan(b_position);
-
- cmp = a->ref(ID).compare(b->ref(ID));
- return cmp < 0;
-}
-
-// static
-bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
- // This index excludes deleted items and the root item. The root
- // item is excluded so that it doesn't show up as a child of itself.
- return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
-}
-
// static
const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
FILE_PATH_LITERAL("SyncData.sqlite3");
@@ -116,7 +93,7 @@ Directory::Kernel::Kernel(
name(name),
metahandles_index(new Directory::MetahandlesIndex),
ids_index(new Directory::IdsIndex),
- parent_id_child_index(new Directory::ParentIdChildIndex),
+ parent_child_index(new ParentChildIndex),
client_tag_index(new Directory::ClientTagIndex),
unsynced_metahandles(new MetahandleSet),
dirty_metahandles(new MetahandleSet),
@@ -135,7 +112,7 @@ Directory::Kernel::~Kernel() {
delete unsynced_metahandles;
delete dirty_metahandles;
delete metahandles_to_purge;
- delete parent_id_child_index;
+ delete parent_child_index;
delete client_tag_index;
delete ids_index;
STLDeleteElements(metahandles_index);
@@ -181,8 +158,8 @@ void Directory::InitializeIndices() {
MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
for (; it != kernel_->metahandles_index->end(); ++it) {
EntryKernel* entry = *it;
- InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
- kernel_->parent_id_child_index);
+ if (ParentChildIndex::ShouldInclude(entry))
+ kernel_->parent_child_index->Insert(entry);
InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
const int64 metahandle = entry->ref(META_HANDLE);
@@ -367,8 +344,8 @@ bool Directory::InsertEntry(WriteTransaction* trans,
trans))
return false;
- if (!entry->ref(IS_DEL)) {
- if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second,
+ if (ParentChildIndex::ShouldInclude(entry)) {
+ if (!SyncAssert(kernel_->parent_child_index->Insert(entry),
FROM_HERE,
error,
trans)) {
@@ -399,8 +376,8 @@ bool Directory::ReindexId(WriteTransaction* trans,
{
// Update the indices that depend on the ID field.
ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
- kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater updater_b(lock, entry,
+ kernel_->parent_child_index);
entry->put(ID, new_id);
}
return true;
@@ -413,8 +390,8 @@ bool Directory::ReindexParentId(WriteTransaction* trans,
{
// Update the indices that depend on the PARENT_ID field.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry,
- kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater index_updater(lock, entry,
+ kernel_->parent_child_index);
entry->put(PARENT_ID, new_parent_id);
}
return true;
@@ -550,7 +527,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
// Might not be in it
num_erased = kernel_->client_tag_index->erase(entry);
DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
- if (!SyncAssert(!kernel_->parent_id_child_index->count(entry),
+ if (!SyncAssert(!kernel_->parent_child_index->Contains(entry),
FROM_HERE,
"Deleted entry still present",
(&trans)))
@@ -589,8 +566,6 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types,
// Note the dance around incrementing |it|, since we sometimes erase().
if ((IsRealDataType(local_type) && types.Has(local_type)) ||
(IsRealDataType(server_type) && types.Has(server_type))) {
- if (!UnlinkEntryFromOrder(*it, &trans, &lock, DATA_TYPE_PURGE))
- return false;
int64 handle = (*it)->ref(META_HANDLE);
kernel_->metahandles_to_purge->insert(handle);
@@ -606,8 +581,8 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet types,
num_erased =
kernel_->unapplied_update_metahandles[server_type].erase(handle);
DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
- num_erased = kernel_->parent_id_child_index->erase(entry);
- DCHECK_EQ(entry->ref(IS_DEL), !num_erased);
+ if (kernel_->parent_child_index->Contains(entry))
+ kernel_->parent_child_index->Remove(entry);
kernel_->metahandles_index->erase(it++);
if ((types_to_journal.Has(local_type) ||
@@ -730,14 +705,6 @@ bool Directory::InitialSyncEndedForType(
return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
}
-template <class T> void Directory::TestAndSet(
- T* kernel_data, const T* data_to_set) {
- if (*kernel_data != *data_to_set) {
- *kernel_data = *data_to_set;
- kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
- }
-}
-
string Directory::store_birthday() const {
ScopedKernelLock lock(this);
return kernel_->persisted_info.store_birthday;
@@ -1030,76 +997,6 @@ void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
invariant_check_level_ = check_level;
}
-bool Directory::UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock,
- UnlinkReason unlink_reason) {
- if (!SyncAssert(!trans || this == trans->directory(),
- FROM_HERE,
- "Transaction not pointing to the right directory",
- trans))
- return false;
- Id old_previous = entry->ref(PREV_ID);
- Id old_next = entry->ref(NEXT_ID);
-
- entry->put(NEXT_ID, entry->ref(ID));
- entry->put(PREV_ID, entry->ref(ID));
- entry->mark_dirty(kernel_->dirty_metahandles);
-
- if (!old_previous.IsRoot()) {
- if (old_previous == old_next) {
- // Note previous == next doesn't imply previous == next == Get(ID). We
- // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
- // and deleted before receiving the server ID in the commit response.
- if (!SyncAssert(
- (old_next == entry->ref(ID)) || !old_next.ServerKnows(),
- FROM_HERE,
- "Encounteered inconsistent entry while deleting",
- trans)) {
- return false;
- }
- return true; // Done if we were already self-looped (hence unlinked).
- }
- EntryKernel* previous_entry = GetEntryById(old_previous, lock);
- ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
- // TODO(tim): Multiple asserts here for bug 101039 investigation.
- if (type == AUTOFILL) {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous autofill entry",
- trans)) {
- return false;
- }
- } else {
- if (!SyncAssert(previous_entry != NULL,
- FROM_HERE,
- "Could not find previous entry",
- trans)) {
- return false;
- }
- }
- if (unlink_reason == NODE_MANIPULATION)
- trans->SaveOriginal(previous_entry);
- previous_entry->put(NEXT_ID, old_next);
- previous_entry->mark_dirty(kernel_->dirty_metahandles);
- }
-
- if (!old_next.IsRoot()) {
- EntryKernel* next_entry = GetEntryById(old_next, lock);
- if (!SyncAssert(next_entry != NULL,
- FROM_HERE,
- "Could not find next entry",
- trans)) {
- return false;
- }
- if (unlink_reason == NODE_MANIPULATION)
- trans->SaveOriginal(next_entry);
- next_entry->put(PREV_ID, old_previous);
- next_entry->mark_dirty(kernel_->dirty_metahandles);
- }
- return true;
-}
-
int64 Directory::NextMetahandle() {
ScopedKernelLock lock(this);
int64 metahandle = (kernel_->next_metahandle)++;
@@ -1121,171 +1018,161 @@ Id Directory::NextId() {
bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
ScopedKernelLock lock(this);
- return (GetPossibleFirstChild(lock, id) != NULL);
+ return kernel_->parent_child_index->GetChildren(id) != NULL;
}
-bool Directory::GetFirstChildId(BaseTransaction* trans,
- const Id& parent_id,
- Id* first_child_id) {
+Id Directory::GetFirstChildId(BaseTransaction* trans,
+ const EntryKernel* parent) {
+ DCHECK(parent);
+ DCHECK(parent->ref(IS_DIR));
+
ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
- if (!entry) {
- *first_child_id = Id();
- return true;
- }
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(parent->ref(ID));
- // Walk to the front of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(PREV_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(PREV_ID), &lock);
- if (!entry) {
- *first_child_id = Id();
- return false;
- }
- }
- *first_child_id = entry->ref(ID);
- return true;
+ // We're expected to return root if there are no children.
+ if (!children)
+ return Id();
+
+ return (*children->begin())->ref(ID);
}
-bool Directory::GetLastChildIdForTest(
- BaseTransaction* trans, const Id& parent_id, Id* last_child_id) {
+syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
ScopedKernelLock lock(this);
- EntryKernel* entry = GetPossibleFirstChild(lock, parent_id);
- if (!entry) {
- *last_child_id = Id();
- return true;
- }
- // Walk to the back of the list; the server position ordering
- // is commonly identical to the linked-list ordering, but pending
- // unsynced or unapplied items may diverge.
- while (!entry->ref(NEXT_ID).IsRoot()) {
- entry = GetEntryById(entry->ref(NEXT_ID), &lock);
- if (!entry) {
- *last_child_id = Id();
- return false;
- }
+ DCHECK(ParentChildIndex::ShouldInclude(e));
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+ DCHECK(children && !children->empty());
+ OrderedChildSet::const_iterator i = children->find(e);
+ DCHECK(i != children->end());
+
+ if (i == children->begin()) {
+ return Id();
+ } else {
+ i--;
+ return (*i)->ref(ID);
}
-
- *last_child_id = entry->ref(ID);
- return true;
}
-Id Directory::ComputePrevIdFromServerPosition(
- const EntryKernel* entry,
- const syncable::Id& parent_id) {
+syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
ScopedKernelLock lock(this);
- // Find the natural insertion point in the parent_id_child_index, and
- // work back from there, filtering out ineligible candidates.
- ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(
- lock,
- parent_id,
- NodeOrdinalToInt64(entry->ref(SERVER_ORDINAL_IN_PARENT)),
- entry->ref(ID));
- ParentIdChildIndex::iterator first_sibling =
- GetParentChildIndexLowerBound(lock, parent_id);
-
- while (sibling != first_sibling) {
- --sibling;
- EntryKernel* candidate = *sibling;
-
- // The item itself should never be in the range under consideration.
- DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
-
- // Ignore unapplied updates -- they might not even be server-siblings.
- if (candidate->ref(IS_UNAPPLIED_UPDATE))
- continue;
+ DCHECK(ParentChildIndex::ShouldInclude(e));
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+ DCHECK(children && !children->empty());
+ OrderedChildSet::const_iterator i = children->find(e);
+ DCHECK(i != children->end());
+
+ i++;
+ if (i == children->end()) {
+ return Id();
+ } else {
+ return (*i)->ref(ID);
+ }
+}
- // We can't trust the SERVER_ fields of unsynced items, but they are
- // potentially legitimate local predecessors. In the case where
- // |update_item| and an unsynced item wind up in the same insertion
- // position, we need to choose how to order them. The following check puts
- // the unapplied update first; removing it would put the unsynced item(s)
- // first.
- if (candidate->ref(IS_UNSYNCED))
- continue;
+// TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
+// items as siblings of items that do not maintain postions. It is required
+// only for tests. See crbug.com/178282.
+void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
+ DCHECK(!e->ref(IS_DEL));
+ if (!e->ShouldMaintainPosition()) {
+ DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
+ return;
+ }
+ std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
+ DCHECK(!suffix.empty());
- // Skip over self-looped items, which are not valid predecessors. This
- // shouldn't happen in practice, but is worth defending against.
- if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
- !candidate->ref(PREV_ID).IsRoot()) {
- NOTREACHED();
- continue;
+ // Remove our item from the ParentChildIndex and remember to re-add it later.
+ ScopedKernelLock lock(this);
+ ScopedParentChildIndexUpdater updater(lock, e, kernel_->parent_child_index);
+
+ // Note: The ScopedParentChildIndexUpdater will update this set for us as we
+ // leave this function.
+ const OrderedChildSet* siblings =
+ kernel_->parent_child_index->GetChildren(e->ref(PARENT_ID));
+
+ if (!siblings) {
+ // This parent currently has no other children.
+ DCHECK(predecessor->ref(ID).IsRoot());
+ UniquePosition pos = UniquePosition::InitialPosition(suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
+ }
+
+ if (predecessor->ref(ID).IsRoot()) {
+ // We have at least one sibling, and we're inserting to the left of them.
+ UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
+
+ UniquePosition pos;
+ if (!successor_pos.IsValid()) {
+ // If all our successors are of non-positionable types, just create an
+ // initial position. We arbitrarily choose to sort invalid positions to
+ // the right of the valid positions.
+ //
+ // We really shouldn't need to support this. See TODO above.
+ pos = UniquePosition::InitialPosition(suffix);
+ } else {
+ DCHECK(!siblings->empty());
+ pos = UniquePosition::Before(successor_pos, suffix);
}
- return candidate->ref(ID);
+
+ e->put(UNIQUE_POSITION, pos);
+ return;
}
- // This item will be the first in the sibling order.
- return Id();
-}
-Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking) {
- kernel_->needle.put(PARENT_ID, parent_id);
- kernel_->needle.put(SERVER_ORDINAL_IN_PARENT,
- Int64ToNodeOrdinal(position_in_parent));
- kernel_->needle.put(ID, item_id_for_tiebreaking);
- return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
-}
+ // We can't support placing an item after an invalid position. Fortunately,
+ // the tests don't exercise this particular case. We should not support
+ // siblings with invalid positions at all. See TODO above.
+ DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
+
+ OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
+ DCHECK(neighbour != siblings->end());
+
+ ++neighbour;
+ if (neighbour == siblings->end()) {
+ // Inserting at the end of the list.
+ UniquePosition pos = UniquePosition::After(
+ predecessor->ref(UNIQUE_POSITION),
+ suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
+ }
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // Peg the parent ID, and use the least values for the remaining
- // index variables.
- return LocateInParentChildIndex(lock, parent_id,
- std::numeric_limits<int64>::min(),
- Id::GetLeastIdForLexicographicComparison());
-}
+ EntryKernel* successor = *neighbour;
+
+ // Another mixed valid and invalid position case. This one could be supported
+ // in theory, but we're trying to deprecate support for siblings with and
+ // without valid positions. See TODO above.
+ DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
-Directory::ParentIdChildIndex::iterator
-Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
- const Id& parent_id) {
- // The upper bound of |parent_id|'s range is the lower
- // bound of |++parent_id|'s range.
- return GetParentChildIndexLowerBound(lock,
- parent_id.GetLexicographicSuccessor());
+ // Finally, the normal case: inserting between two elements.
+ UniquePosition pos = UniquePosition::Between(
+ predecessor->ref(UNIQUE_POSITION),
+ successor->ref(UNIQUE_POSITION),
+ suffix);
+ e->put(UNIQUE_POSITION, pos);
+ return;
}
+// TODO(rlarocque): Avoid this indirection. Just return the set.
void Directory::AppendChildHandles(const ScopedKernelLock& lock,
const Id& parent_id,
Directory::ChildHandles* result) {
- typedef ParentIdChildIndex::iterator iterator;
- CHECK(result);
- for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
- end = GetParentChildIndexUpperBound(lock, parent_id);
- i != end; ++i) {
+ const OrderedChildSet* children =
+ kernel_->parent_child_index->GetChildren(parent_id);
+ if (!children)
+ return;
+
+ for (OrderedChildSet::const_iterator i = children->begin();
+ i != children->end(); ++i) {
DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
result->push_back((*i)->ref(META_HANDLE));
}
}
-EntryKernel* Directory::GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id) {
- // We can use the server positional ordering as a hint because it's generally
- // in sync with the local (linked-list) positional ordering, and we have an
- // index on it.
- ParentIdChildIndex::iterator candidate =
- GetParentChildIndexLowerBound(lock, parent_id);
- ParentIdChildIndex::iterator end_range =
- GetParentChildIndexUpperBound(lock, parent_id);
- for (; candidate != end_range; ++candidate) {
- EntryKernel* entry = *candidate;
- // Filter out self-looped items, which are temporarily not in the child
- // ordering.
- if (entry->ref(PREV_ID).IsRoot() ||
- entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
- return entry;
- }
- }
- // There were no children in the linked list.
- return NULL;
-}
-
ScopedKernelLock::ScopedKernelLock(const Directory* dir)
: scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
}
diff --git a/sync/syncable/directory.h b/sync/syncable/directory.h
index 51ceaf8..61c5522 100644
--- a/sync/syncable/directory.h
+++ b/sync/syncable/directory.h
@@ -17,6 +17,7 @@
#include "sync/syncable/dir_open_result.h"
#include "sync/syncable/entry_kernel.h"
#include "sync/syncable/metahandle_set.h"
+#include "sync/syncable/parent_child_index.h"
#include "sync/syncable/scoped_kernel_lock.h"
#include "sync/syncable/syncable_delete_journal.h"
@@ -86,21 +87,6 @@ struct ClientTagIndexer {
static bool ShouldInclude(const EntryKernel* a);
};
-// This index contains EntryKernels ordered by parent ID and metahandle.
-// It allows efficient lookup of the children of a given parent.
-struct ParentIdAndHandleIndexer {
- // This index is of the parent ID and metahandle. We use a custom
- // comparator.
- class Comparator {
- public:
- bool operator() (const syncable::EntryKernel* a,
- const syncable::EntryKernel* b) const;
- };
-
- // This index does not include deleted items.
- static bool ShouldInclude(const EntryKernel* a);
-};
-
// Given an Indexer providing the semantics of an index, defines the
// set type used to actually contain the index.
template <typename Indexer>
@@ -108,13 +94,6 @@ struct Index {
typedef std::set<EntryKernel*, typename Indexer::Comparator> Set;
};
-// Reason for unlinking.
-enum UnlinkReason {
- NODE_MANIPULATION, // To be used by any operation manipulating the linked
- // list.
- DATA_TYPE_PURGE // To be used when purging a dataype.
-};
-
enum InvariantCheckLevel {
OFF = 0, // No checking.
VERIFY_CHANGES = 1, // Checks only mutated entries. Does not check hierarchy.
@@ -126,9 +105,7 @@ class SYNC_EXPORT Directory {
friend class Entry;
friend class MutableEntry;
friend class ReadTransaction;
- friend class ReadTransactionWithoutDB;
friend class ScopedKernelLock;
- friend class ScopedKernelUnlock;
friend class WriteTransaction;
friend class SyncableDirectoryTest;
friend class syncer::TestUserShare;
@@ -233,9 +210,8 @@ class SYNC_EXPORT Directory {
void Close();
int64 NextMetahandle();
- // Always returns a negative id. Positive client ids are generated
- // by the server only.
- Id NextId();
+ // Returns a negative integer unique to this client.
+ syncable::Id NextId();
bool good() const { return NULL != kernel_; }
@@ -320,13 +296,6 @@ class SYNC_EXPORT Directory {
const Id& new_parent_id);
void ClearDirtyMetahandles();
- // These don't do semantic checking.
- // The semantic checking is implemented higher up.
- bool UnlinkEntryFromOrder(EntryKernel* entry,
- WriteTransaction* trans,
- ScopedKernelLock* lock,
- UnlinkReason unlink_reason);
-
DirOpenResult OpenImpl(
const std::string& name,
DirectoryChangeDelegate* delegate,
@@ -337,8 +306,6 @@ class SYNC_EXPORT Directory {
// before calling.
EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
- template <class T> void TestAndSet(T* kernel_data, const T* data_to_set);
-
public:
typedef std::vector<int64> ChildHandles;
@@ -361,23 +328,27 @@ class SYNC_EXPORT Directory {
// and fill in |*first_child_id| with its id. Fills in a root Id if
// parent has no children. Returns true if the first child was
// successfully found, or false if an error was encountered.
- bool GetFirstChildId(BaseTransaction* trans, const Id& parent_id,
- Id* first_child_id) WARN_UNUSED_RESULT;
+ Id GetFirstChildId(BaseTransaction* trans, const EntryKernel* parent);
- // Find the last child in the positional ordering under a parent,
- // and fill in |*first_child_id| with its id. Fills in a root Id if
- // parent has no children. Returns true if the first child was
- // successfully found, or false if an error was encountered.
- bool GetLastChildIdForTest(BaseTransaction* trans, const Id& parent_id,
- Id* last_child_id) WARN_UNUSED_RESULT;
+ // These functions allow one to fetch the next or previous item under
+ // the same folder. Returns the "root" ID if there is no predecessor
+ // or successor.
+ //
+ // TODO(rlarocque): These functions are used mainly for tree traversal. We
+ // should replace these with an iterator API. See crbug.com/178275.
+ syncable::Id GetPredecessorId(EntryKernel*);
+ syncable::Id GetSuccessorId(EntryKernel*);
- // Compute a local predecessor position for |update_item|. The position
- // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
- // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
- // children of |parent_id|.
- Id ComputePrevIdFromServerPosition(
- const EntryKernel* update_item,
- const syncable::Id& parent_id);
+ // Places |e| as a successor to |predecessor|. If |predecessor| is NULL,
+ // |e| will be placed as the left-most item in its folder.
+ //
+ // Both |e| and |predecessor| must be valid entries under the same parent.
+ //
+ // TODO(rlarocque): This function includes limited support for placing items
+ // with valid positions (ie. Bookmarks) as siblings of items that have no set
+ // ordering (ie. Autofill items). This support is required only for tests,
+ // and should be removed. See crbug.com/178282.
+ void PutPredecessor(EntryKernel* e, EntryKernel* predecessor);
// SaveChanges works by taking a consistent snapshot of the current Directory
// state and indices (by deep copy) under a ReadTransaction, passing this
@@ -486,12 +457,9 @@ class SYNC_EXPORT Directory {
Directory& operator = (const Directory&);
public:
+ // These contain all items, including IS_DEL items.
typedef Index<MetahandleIndexer>::Set MetahandlesIndex;
typedef Index<IdIndexer>::Set IdsIndex;
- // All entries in memory must be in both the MetahandlesIndex and
- // the IdsIndex, but only non-deleted entries will be the
- // ParentIdChildIndex.
- typedef Index<ParentIdAndHandleIndexer>::Set ParentIdChildIndex;
// Contains both deleted and existing entries with tags.
// We can't store only existing tags because the client would create
@@ -538,7 +506,11 @@ class SYNC_EXPORT Directory {
MetahandlesIndex* metahandles_index;
// Entries indexed by id
IdsIndex* ids_index;
- ParentIdChildIndex* parent_id_child_index;
+
+ // Contains non-deleted items, indexed according to parent and position
+ // within parent. Protected by the ScopedKernelLock.
+ ParentChildIndex* parent_child_index;
+
ClientTagIndex* client_tag_index;
// So we don't have to create an EntryKernel every time we want to
// look something up in an index. Needle in haystack metaphor.
@@ -583,36 +555,11 @@ class SYNC_EXPORT Directory {
const WeakHandle<TransactionObserver> transaction_observer;
};
- // Helper method used to do searches on |parent_id_child_index|.
- ParentIdChildIndex::iterator LocateInParentChildIndex(
- const ScopedKernelLock& lock,
- const Id& parent_id,
- int64 position_in_parent,
- const Id& item_id_for_tiebreaking);
-
- // Return an iterator to the beginning of the range of the children of
- // |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexLowerBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
- // Return an iterator to just past the end of the range of the
- // children of |parent_id| in the kernel's parent_id_child_index.
- ParentIdChildIndex::iterator GetParentChildIndexUpperBound(
- const ScopedKernelLock& lock,
- const Id& parent_id);
-
// Append the handles of the children of |parent_id| to |result|.
void AppendChildHandles(
const ScopedKernelLock& lock,
const Id& parent_id, Directory::ChildHandles* result);
- // Return a pointer to what is probably (but not certainly) the
- // first child of |parent_id|, or NULL if |parent_id| definitely has
- // no children.
- EntryKernel* GetPossibleFirstChild(
- const ScopedKernelLock& lock, const Id& parent_id);
-
Kernel* kernel_;
scoped_ptr<DirectoryBackingStore> store_;
diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc
index 0683957..f0b87be 100644
--- a/sync/syncable/directory_backing_store.cc
+++ b/sync/syncable/directory_backing_store.cc
@@ -10,13 +10,8 @@
#include "base/base64.h"
#include "base/debug/trace_event.h"
-#include "base/file_util.h"
-#include "base/hash_tables.h"
#include "base/logging.h"
-#include "base/metrics/histogram.h"
#include "base/rand_util.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
#include "base/stringprintf.h"
#include "base/time.h"
#include "sql/connection.h"
@@ -27,6 +22,7 @@
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_columns.h"
+#include "sync/syncable/syncable_util.h"
#include "sync/util/time.h"
using std::string;
@@ -39,7 +35,7 @@ namespace syncable {
static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
-const int32 kCurrentDBVersion = 85;
+const int32 kCurrentDBVersion = 86;
// Iterate over the fields of |entry| and bind each to |statement| for
// updating. Returns the number of args bound.
@@ -64,13 +60,14 @@ void BindFields(const EntryKernel& entry,
for ( ; i < STRING_FIELDS_END; ++i) {
statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
}
- std::string temp;
for ( ; i < PROTO_FIELDS_END; ++i) {
+ std::string temp;
entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
statement->BindBlob(index++, temp.data(), temp.length());
}
- for( ; i < ORDINAL_FIELDS_END; ++i) {
- temp = entry.ref(static_cast<OrdinalField>(i)).ToInternalValue();
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
+ std::string temp;
+ entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
statement->BindBlob(index++, temp.data(), temp.length());
}
}
@@ -104,19 +101,18 @@ scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
statement->ColumnBlob(i), statement->ColumnByteLength(i));
}
- for( ; i < ORDINAL_FIELDS_END; ++i) {
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
std::string temp;
statement->ColumnBlobAsString(i, &temp);
- NodeOrdinal unpacked_ord(temp);
- // Its safe to assume that an invalid ordinal is a sign that
- // some external corruption has occurred. Return NULL to force
- // a re-download of the sync data.
- if(!unpacked_ord.IsValid()) {
- DVLOG(1) << "Unpacked invalid ordinal. Signaling that the DB is corrupt";
+ sync_pb::UniquePosition proto;
+ if (!proto.ParseFromString(temp)) {
+ DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
return scoped_ptr<EntryKernel>(NULL);
}
- kernel->mutable_ref(static_cast<OrdinalField>(i)) = unpacked_ord;
+
+ kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
+ UniquePosition::FromProto(proto);
}
return kernel.Pass();
}
@@ -400,6 +396,13 @@ bool DirectoryBackingStore::InitializeTables() {
version_on_disk = 85;
}
+ // Version 86 migration converts bookmarks to the unique positioning system.
+ // It also introduces a new field to store a unique ID for each bookmark.
+ if (version_on_disk == 85) {
+ if (MigrateVersion85To86())
+ version_on_disk = 86;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -961,7 +964,8 @@ bool DirectoryBackingStore::MigrateVersion76To77() {
#if defined(OS_WIN)
// On Windows, we used to store timestamps in FILETIME format (100s of
// ns since Jan 1, 1601). Magic numbers taken from
-// http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win32-filetime
+// http://stackoverflow.com/questions/5398557/
+// java-library-for-dealing-with-win32-filetime
// .
#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
#else
@@ -1085,7 +1089,7 @@ bool DirectoryBackingStore::MigrateVersion83To84() {
}
bool DirectoryBackingStore::MigrateVersion84To85() {
- // Version 84 removes the initial_sync_ended flag.
+ // Version 85 removes the initial_sync_ended flag.
if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
return false;
if (!CreateModelsTable())
@@ -1101,6 +1105,131 @@ bool DirectoryBackingStore::MigrateVersion84To85() {
return true;
}
+bool DirectoryBackingStore::MigrateVersion85To86() {
+ // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
+ // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
+ // and SERVER_UNIQUE_POSITION.
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "server_unique_position BLOB")) {
+ return false;
+ }
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "unique_position BLOB")) {
+ return false;
+ }
+ if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
+ "unique_bookmark_tag VARCHAR")) {
+ return false;
+ }
+
+ // Fetch the cache_guid from the DB, because we don't otherwise have access to
+ // it from here.
+ sql::Statement get_cache_guid(db_->GetUniqueStatement(
+ "SELECT cache_guid FROM share_info"));
+ if (!get_cache_guid.Step()) {
+ return false;
+ }
+ std::string cache_guid = get_cache_guid.ColumnString(0);
+ DCHECK(!get_cache_guid.Step());
+ DCHECK(get_cache_guid.Succeeded());
+
+ sql::Statement get(db_->GetUniqueStatement(
+ "SELECT "
+ " metahandle, "
+ " id, "
+ " specifics, "
+ " is_dir, "
+ " unique_server_tag, "
+ " server_ordinal_in_parent "
+ "FROM metas"));
+
+ // Note that we set both the local and server position based on the server
+ // position. We wll lose any unsynced local position changes. Unfortunately,
+ // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
+ // can't be translated into a UNIQUE_POSTION in a reliable way.
+ sql::Statement put(db_->GetCachedStatement(
+ SQL_FROM_HERE,
+ "UPDATE metas SET"
+ " server_unique_position = ?,"
+ " unique_position = ?,"
+ " unique_bookmark_tag = ?"
+ "WHERE metahandle = ?"));
+
+ while (get.Step()) {
+ int64 metahandle = get.ColumnInt64(0);
+
+ std::string id_string;
+ get.ColumnBlobAsString(1, &id_string);
+
+ sync_pb::EntitySpecifics specifics;
+ specifics.ParseFromArray(
+ get.ColumnBlob(2), get.ColumnByteLength(2));
+
+ bool is_dir = get.ColumnBool(3);
+
+ std::string server_unique_tag = get.ColumnString(4);
+
+ std::string ordinal_string;
+ get.ColumnBlobAsString(5, &ordinal_string);
+ NodeOrdinal ordinal(ordinal_string);
+
+
+ std::string unique_bookmark_tag;
+
+ // We only maintain positions for bookmarks that are not server-defined
+ // top-level folders.
+ UniquePosition position;
+ if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
+ && !(is_dir && !server_unique_tag.empty())) {
+ if (id_string.at(0) == 'c') {
+ // We found an uncommitted item. This is rare, but fortunate. This
+ // means we can set the bookmark tag according to the originator client
+ // item ID and originator cache guid, because (unlike the other case) we
+ // know that this client is the originator.
+ unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
+ cache_guid,
+ id_string.substr(1));
+ } else {
+ // If we've already committed the item, then we don't know who the
+ // originator was. We do not have access to the originator client item
+ // ID and originator cache guid at this point.
+ //
+ // We will base our hash entirely on the server ID instead. This is
+ // incorrect, but at least all clients that undergo this migration step
+ // will be incorrect in the same way.
+ //
+ // To get everyone back into a synced state, we will update the bookmark
+ // tag according to the originator_cache_guid and originator_item_id
+ // when we see updates for this item. That should ensure that commonly
+ // modified items will end up with the proper tag values eventually.
+ unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
+ std::string(), // cache_guid left intentionally blank.
+ id_string.substr(1));
+ }
+
+ int64 int_position = NodeOrdinalToInt64(ordinal);
+ position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
+ } else {
+ // Leave bookmark_tag and position at their default (invalid) values.
+ }
+
+ std::string position_blob;
+ position.SerializeToString(&position_blob);
+ put.BindBlob(0, position_blob.data(), position_blob.length());
+ put.BindBlob(1, position_blob.data(), position_blob.length());
+ put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
+ put.BindInt64(3, metahandle);
+
+ if (!put.Run())
+ return false;
+ put.Reset(true);
+ }
+
+ SetVersion(86);
+ needs_column_refresh_ = true;
+ return true;
+}
+
bool DirectoryBackingStore::CreateTables() {
DVLOG(1) << "First run, creating tables";
// Create two little tables share_version and share_info
@@ -1165,13 +1294,10 @@ bool DirectoryBackingStore::CreateTables() {
const int64 now = TimeToProtoTime(base::Time::Now());
sql::Statement s(db_->GetUniqueStatement(
"INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) "
- "VALUES ( \"r\", 1, 1, ?, ?, ?)"));
+ "( id, metahandle, is_dir, ctime, mtime ) "
+ "VALUES ( \"r\", 1, 1, ?, ? )"));
s.BindInt64(0, now);
s.BindInt64(1, now);
- const std::string ord =
- NodeOrdinal::CreateInitialOrdinal().ToInternalValue();
- s.BindBlob(2, ord.data(), ord.length());
if (!s.Run())
return false;
@@ -1273,8 +1399,8 @@ bool DirectoryBackingStore::CreateShareInfoTableVersion71(
}
// This function checks to see if the given list of Metahandles has any nodes
-// whose PREV_ID, PARENT_ID or NEXT_ID values refer to ID values that do not
-// actually exist. Returns true on success.
+// whose PARENT_ID values refer to ID values that do not actually exist.
+// Returns true on success.
bool DirectoryBackingStore::VerifyReferenceIntegrity(
const syncable::MetahandlesIndex &index) {
TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
@@ -1295,10 +1421,10 @@ bool DirectoryBackingStore::VerifyReferenceIntegrity(
for (MetahandlesIndex::const_iterator it = index.begin();
it != index.end(); ++it) {
EntryKernel* entry = *it;
- bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end);
bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
- bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end);
- is_ok = is_ok && prev_exists && parent_exists && next_exists;
+ if (!parent_exists) {
+ return false;
+ }
}
return is_ok;
}
diff --git a/sync/syncable/directory_backing_store.h b/sync/syncable/directory_backing_store.h
index 3ee8844..1b0b721 100644
--- a/sync/syncable/directory_backing_store.h
+++ b/sync/syncable/directory_backing_store.h
@@ -172,6 +172,7 @@ class SYNC_EXPORT_PRIVATE DirectoryBackingStore : public base::NonThreadSafe {
bool MigrateVersion82To83();
bool MigrateVersion83To84();
bool MigrateVersion84To85();
+ bool MigrateVersion85To86();
scoped_ptr<sql::Connection> db_;
sql::Statement save_meta_statment_;
diff --git a/sync/syncable/directory_backing_store_unittest.cc b/sync/syncable/directory_backing_store_unittest.cc
index a9da2ca0..6b5f5af 100644
--- a/sync/syncable/directory_backing_store_unittest.cc
+++ b/sync/syncable/directory_backing_store_unittest.cc
@@ -72,15 +72,16 @@ class MigrationTest : public testing::TestWithParam<int> {
void SetUpVersion83Database(sql::Connection* connection);
void SetUpVersion84Database(sql::Connection* connection);
void SetUpVersion85Database(sql::Connection* connection);
+ void SetUpVersion86Database(sql::Connection* connection);
void SetUpCurrentDatabaseAndCheckVersion(sql::Connection* connection) {
- SetUpVersion85Database(connection); // Prepopulates data.
+ SetUpVersion86Database(connection); // Prepopulates data.
scoped_ptr<TestDirectoryBackingStore> dbs(
new TestDirectoryBackingStore(GetUsername(), connection));
+ ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
ASSERT_TRUE(LoadAndIgnoreReturnedData(dbs.get()));
ASSERT_FALSE(dbs->needs_column_refresh_);
- ASSERT_EQ(kCurrentDBVersion, dbs->GetVersion());
}
private:
@@ -2426,6 +2427,96 @@ void MigrationTest::SetUpVersion85Database(sql::Connection* connection) {
ASSERT_TRUE(connection->CommitTransaction());
}
+void MigrationTest::SetUpVersion86Database(sql::Connection* connection) {
+ ASSERT_TRUE(connection->is_open());
+ ASSERT_TRUE(connection->BeginTransaction());
+ ASSERT_TRUE(connection->Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',86);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB,"
+ " transaction_version BIGINT default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,b"
+ "ase_version bigint default -1,server_version bigint default 0,local_e"
+ "xternal_id bigint default 0,transaction_version bigint default 0,mtim"
+ "e bigint default 0,server_mtime bigint default 0,ctime bigint default"
+ " 0,server_ctime bigint default 0,id varchar(255) default 'r',parent_i"
+ "d varchar(255) default 'r',server_parent_id varchar(255) default 'r',"
+ "is_unsynced bit default 0,is_unapplied_update bit default 0,is_del bi"
+ "t default 0,is_dir bit default 0,server_is_dir bit default 0,server_i"
+ "s_del bit default 0,non_unique_name varchar,server_non_unique_name va"
+ "rchar(255),unique_server_tag varchar,unique_client_tag varchar,specif"
+ "ics blob,server_specifics blob,base_server_specifics blob,server_uniq"
+ "ue_position blob,unique_position blob,unique_bookmark_tag blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0,0,0,"
+ META_PROTO_TIMES_VALS(1)
+ ",'r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,"
+ "X'',X'',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(6,694,694,6,0,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9',0,0,0,1,1,0,'T"
+ "he Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000',NULL,X'"
+ "',X'',X'');"
+ "INSERT INTO 'metas' VALUES(7,663,663,0,0,"
+ META_PROTO_TIMES_VALS(7) ",'s_ID_7','r','r',0,0,0,1,1,0,'Google Chro"
+ "me','Google Chrome','google_chrome',NULL,NULL,NULL,NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(8,664,664,0,0,"
+ META_PROTO_TIMES_VALS(8) ",'s_ID_8','s_ID_7','s_ID_7',0,0,0,1,1,0,'B"
+ "ookmarks','Bookmarks','google_chrome_bookmarks',NULL,X'C2881000',X'C2"
+ "881000',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(9,665,665,1,0,"
+ META_PROTO_TIMES_VALS(9) ",'s_ID_9','s_ID_8','s_ID_8',0,0,0,1,1,0,'B"
+ "ookmark Bar','Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C288100"
+ "0',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(10,666,666,2,0,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8',0,0,0,1,1,0,"
+ "'Other Bookmarks','Other Bookmarks','other_bookmarks',NULL,X'C2881000"
+ "',X'C2881000',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(11,683,683,8,0,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6',0,0,0,0,0,0,"
+ "'Home (The Chromium Projects)','Home (The Chromium Projects)',NULL,NU"
+ "LL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756D2E6F72672F1206"
+ "414741545741',X'C28810290A1D687474703A2F2F6465762E6368726F6D69756D2E6"
+ "F72672F6F7468657212084146414756415346',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(12,685,685,9,0,"
+ META_PROTO_TIMES_VALS(12) ",'s_ID_12','s_ID_6','s_ID_6',0,0,0,1,1,0,"
+ "'Extra Bookmarks','Extra Bookmarks',NULL,NULL,X'C2881000',X'C2881000'"
+ ",NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(13,687,687,10,0,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6',0,0,0,0,0,0"
+ ",'ICANN | Internet Corporation for Assigned Names and Numbers','ICANN"
+ " | Internet Corporation for Assigned Names and Numbers',NULL,NULL,X'C"
+ "28810240A15687474703A2F2F7777772E6963616E6E2E636F6D2F120B504E47415846"
+ "3041414646',X'C28810200A15687474703A2F2F7777772E6963616E6E2E636F6D2F1"
+ "20744414146415346',NULL,X'',X'',X'');"
+ "INSERT INTO 'metas' VALUES(14,692,692,11,0,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6',0,0,0,0,0,0"
+ ",'The WebKit Open Source Project','The WebKit Open Source Project',NU"
+ "LL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F72672F1204504E475"
+ "8',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F781205504E473259"
+ "',NULL,X'',X'',X'');"
+ "CREATE TABLE deleted_metas (metahandle bigint primary key ON CONFLICT"
+ " FAIL,base_version bigint default -1,server_version bigint default 0,"
+ "local_external_id bigint default 0,transaction_version bigint default"
+ " 0,mtime bigint default 0,server_mtime bigint default 0,ctime bigint "
+ "default 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defau"
+ "lt 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,s"
+ "erver_is_del bit default 0,non_unique_name varchar,server_non_unique_"
+ "name varchar(255),unique_server_tag varchar,unique_client_tag varchar"
+ ",specifics blob,server_specifics blob,base_server_specifics blob,serv"
+ "er_unique_position blob,unique_position blob,unique_bookmark_tag blob"
+ ");"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birt"
+ "hday TEXT, db_create_version TEXT, db_create_time INT, next_id INT de"
+ "fault -2, cache_guid TEXT, notification_state BLOB, bag_of_chips BLOB"
+ ");"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.or"
+ "g','c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-13107"
+ "8,'9010788312004066376x-6609234393368420856x',NULL,NULL);"));
+ ASSERT_TRUE(connection->CommitTransaction());
+}
+
TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2812,34 +2903,6 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion80To81) {
ASSERT_EQ(expected_ordinal, actual_ordinal);
}
-TEST_F(DirectoryBackingStoreTest, DetectInvalidOrdinal) {
- sql::Connection connection;
- ASSERT_TRUE(connection.OpenInMemory());
- SetUpVersion81Database(&connection);
-
- scoped_ptr<TestDirectoryBackingStore> dbs(
- new TestDirectoryBackingStore(GetUsername(), &connection));
- ASSERT_EQ(81, dbs->GetVersion());
-
- // Insert row with bad ordinal.
- const int64 now = TimeToProtoTime(base::Time::Now());
- sql::Statement s(connection.GetUniqueStatement(
- "INSERT INTO metas "
- "( id, metahandle, is_dir, ctime, mtime, server_ordinal_in_parent) "
- "VALUES( \"c-invalid\", 9999, 1, ?, ?, \" \")"));
- s.BindInt64(0, now);
- s.BindInt64(1, now);
- ASSERT_TRUE(s.Run());
-
- // Trying to unpack this entry should signal that the DB is corrupted.
- MetahandlesIndex entry_bucket;
- JournalIndex delete_journals;;
- STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
- Directory::KernelLoadInfo kernel_load_info;
- ASSERT_EQ(FAILED_DATABASE_CORRUPT,
- dbs->Load(&entry_bucket, &delete_journals, &kernel_load_info));
-}
-
TEST_F(DirectoryBackingStoreTest, MigrateVersion81To82) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2897,6 +2960,130 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion84To85) {
ASSERT_FALSE(connection.DoesColumnExist("models", "initial_sync_ended"));
}
+TEST_F(DirectoryBackingStoreTest, MigrateVersion85To86) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion85Database(&connection);
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "next_id"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "prev_id"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_position"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "server_unique_position"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_TRUE(dbs->MigrateVersion85To86());
+ EXPECT_EQ(86, dbs->GetVersion());
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_position"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "server_unique_position"));
+ EXPECT_TRUE(connection.DoesColumnExist("metas", "unique_bookmark_tag"));
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->RefreshColumns());
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "next_id"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "prev_id"));
+ EXPECT_FALSE(connection.DoesColumnExist("metas", "server_ordinal_in_parent"));
+
+ {
+ MetahandlesIndex metas;
+ STLElementDeleter<MetahandlesIndex> deleter(&metas);
+ dbs->LoadEntries(&metas);
+
+ EntryKernel needle;
+
+ // Grab a bookmark and examine it.
+ needle.put(META_HANDLE, 5);
+ MetahandlesIndex::iterator i = metas.find(&needle);
+ ASSERT_FALSE(i == metas.end());
+ EntryKernel* bm = *i;
+ ASSERT_EQ(bm->ref(ID).value(), "s_ID_5");
+
+ EXPECT_TRUE(bm->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(bm->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_EQ(UniquePosition::kSuffixLength,
+ bm->ref(UNIQUE_BOOKMARK_TAG).length());
+
+ // Grab a non-bookmark and examine it.
+ needle.put(META_HANDLE, 1);
+ MetahandlesIndex::iterator j = metas.find(&needle);
+ ASSERT_FALSE(j == metas.end());
+ EntryKernel* root = *j;
+ ASSERT_EQ(root->ref(ID).value(), "r");
+
+ EXPECT_FALSE(root->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(root->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(root->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure we didn't mistake the bookmark root node for a real bookmark.
+ needle.put(META_HANDLE, 8);
+ MetahandlesIndex::iterator k = metas.find(&needle);
+ ASSERT_FALSE(k == metas.end());
+ EntryKernel* bm_root = *k;
+ ASSERT_EQ(bm_root->ref(ID).value(), "s_ID_8");
+ ASSERT_EQ(bm_root->ref(UNIQUE_SERVER_TAG), "google_chrome_bookmarks");
+
+ EXPECT_FALSE(bm_root->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(bm_root->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(bm_root->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure we didn't assign positions to server-created folders, either.
+ needle.put(META_HANDLE, 10);
+ MetahandlesIndex::iterator l = metas.find(&needle);
+ ASSERT_FALSE(l == metas.end());
+ EntryKernel* perm_folder = *l;
+ ASSERT_EQ(perm_folder->ref(ID).value(), "s_ID_10");
+ ASSERT_EQ(perm_folder->ref(UNIQUE_SERVER_TAG), "other_bookmarks");
+
+ EXPECT_FALSE(perm_folder->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE(perm_folder->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE(perm_folder->ref(UNIQUE_BOOKMARK_TAG).empty());
+
+ // Make sure that the syncable::Directory and the migration code agree on
+ // which items should or should not have unique position values. This test
+ // may become obsolete if the directory's definition of that function
+ // changes, but, until then, this is a useful test.
+ for (MetahandlesIndex::iterator it = metas.begin();
+ it != metas.end(); it++) {
+ SCOPED_TRACE((*it)->ref(ID));
+ if ((*it)->ShouldMaintainPosition()) {
+ EXPECT_TRUE((*it)->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE((*it)->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE((*it)->ref(UNIQUE_BOOKMARK_TAG).empty());
+ } else {
+ EXPECT_FALSE((*it)->ref(UNIQUE_POSITION).IsValid());
+ EXPECT_FALSE((*it)->ref(SERVER_UNIQUE_POSITION).IsValid());
+ EXPECT_TRUE((*it)->ref(UNIQUE_BOOKMARK_TAG).empty());
+ }
+ }
+ }
+}
+
+TEST_F(DirectoryBackingStoreTest, DetectInvalidPosition) {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.OpenInMemory());
+ SetUpVersion86Database(&connection);
+
+ scoped_ptr<TestDirectoryBackingStore> dbs(
+ new TestDirectoryBackingStore(GetUsername(), &connection));
+ ASSERT_EQ(86, dbs->GetVersion());
+
+ // Insert row with bad position.
+ sql::Statement s(connection.GetUniqueStatement(
+ "INSERT INTO metas "
+ "( id, metahandle, is_dir, ctime, mtime,"
+ " unique_position, server_unique_position) "
+ "VALUES('c-invalid', 9999, 1, 0, 0, 'BAD_POS', 'BAD_POS')"));
+ ASSERT_TRUE(s.Run());
+
+ // Trying to unpack this entry should signal that the DB is corrupted.
+ MetahandlesIndex entry_bucket;
+ JournalIndex delete_journals;;
+ STLElementDeleter<MetahandlesIndex> deleter(&entry_bucket);
+ Directory::KernelLoadInfo kernel_load_info;
+ ASSERT_EQ(FAILED_DATABASE_CORRUPT,
+ dbs->Load(&entry_bucket, &delete_journals, &kernel_load_info));
+}
+
TEST_P(MigrationTest, ToCurrentVersion) {
sql::Connection connection;
ASSERT_TRUE(connection.OpenInMemory());
@@ -2955,6 +3142,12 @@ TEST_P(MigrationTest, ToCurrentVersion) {
case 84:
SetUpVersion84Database(&connection);
break;
+ case 85:
+ SetUpVersion85Database(&connection);
+ break;
+ case 86:
+ SetUpVersion86Database(&connection);
+ break;
default:
// If you see this error, it may mean that you've increased the
// database version number but you haven't finished adding unit tests
@@ -3169,7 +3362,7 @@ TEST_P(MigrationTest, ToCurrentVersion) {
}
INSTANTIATE_TEST_CASE_P(DirectoryBackingStore, MigrationTest,
- testing::Range(67, kCurrentDBVersion));
+ testing::Range(67, kCurrentDBVersion + 1));
TEST_F(DirectoryBackingStoreTest, ModelTypeIds) {
ModelTypeSet protocol_types = ProtocolTypes();
diff --git a/sync/syncable/entry.cc b/sync/syncable/entry.cc
index c6cc4f0..2d98c6f 100644
--- a/sync/syncable/entry.cc
+++ b/sync/syncable/entry.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "base/json/string_escape.h"
+#include "base/string_util.h"
#include "sync/syncable/blob.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/syncable_base_transaction.h"
@@ -41,10 +42,6 @@ Directory* Entry::dir() const {
return basetrans_->directory();
}
-Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
- return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
-}
-
DictionaryValue* Entry::ToValue(Cryptographer* cryptographer) const {
DictionaryValue* entry_info = new DictionaryValue();
entry_info->SetBoolean("good", good());
@@ -96,11 +93,19 @@ ModelType Entry::GetModelType() const {
}
Id Entry::GetPredecessorId() const {
- return kernel_->ref(PREV_ID);
+ return dir()->GetPredecessorId(kernel_);
}
Id Entry::GetSuccessorId() const {
- return kernel_->ref(NEXT_ID);
+ return dir()->GetSuccessorId(kernel_);
+}
+
+Id Entry::GetFirstChildId() const {
+ return dir()->GetFirstChildId(basetrans_, kernel_);
+}
+
+bool Entry::ShouldMaintainPosition() const {
+ return kernel_->ShouldMaintainPosition();
}
std::ostream& operator<<(std::ostream& s, const Blob& blob) {
@@ -142,9 +147,9 @@ std::ostream& operator<<(std::ostream& os, const Entry& entry) {
&escaped_str);
os << g_metas_columns[i].name << ": " << escaped_str << ", ";
}
- for ( ; i < ORDINAL_FIELDS_END; ++i) {
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
os << g_metas_columns[i].name << ": "
- << kernel->ref(static_cast<OrdinalField>(i)).ToDebugString()
+ << kernel->ref(static_cast<UniquePositionField>(i)).ToDebugString()
<< ", ";
}
os << "TempFlags: ";
diff --git a/sync/syncable/entry.h b/sync/syncable/entry.h
index e1c8717..f9d3f90 100644
--- a/sync/syncable/entry.h
+++ b/sync/syncable/entry.h
@@ -93,7 +93,7 @@ class SYNC_EXPORT Entry {
DCHECK(kernel_);
return kernel_->ref(field);
}
- inline const NodeOrdinal& Get(OrdinalField field) const {
+ inline const UniquePosition& Get(UniquePositionField field) const {
DCHECK(kernel_);
return kernel_->ref(field);
}
@@ -107,6 +107,7 @@ class SYNC_EXPORT Entry {
Id GetPredecessorId() const;
Id GetSuccessorId() const;
+ Id GetFirstChildId() const;
inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
DCHECK(kernel_);
@@ -118,18 +119,16 @@ class SYNC_EXPORT Entry {
return kernel_->ref(ID).IsRoot();
}
+ // Returns true if this is an entry that is expected to maintain a certain
+ // sort ordering relative to its siblings under the same parent.
+ bool ShouldMaintainPosition() const;
+
Directory* dir() const;
const EntryKernel GetKernelCopy() const {
return *kernel_;
}
- // Compute a local predecessor position for |update_item|, based on its
- // absolute server position. The returned ID will be a valid predecessor
- // under SERVER_PARENT_ID that is consistent with the
- // SERVER_POSITION_IN_PARENT ordering.
- Id ComputePrevIdFromServerPosition(const Id& parent_id) const;
-
// Dumps all entry info into a DictionaryValue and returns it.
// Transfers ownership of the DictionaryValue to the caller.
base::DictionaryValue* ToValue(Cryptographer* cryptographer) const;
diff --git a/sync/syncable/entry_kernel.cc b/sync/syncable/entry_kernel.cc
index 5b21612..01f2cae 100644
--- a/sync/syncable/entry_kernel.cc
+++ b/sync/syncable/entry_kernel.cc
@@ -21,6 +21,20 @@ EntryKernel::EntryKernel() : dirty_(false) {
EntryKernel::~EntryKernel() {}
+ModelType EntryKernel::GetModelType() const {
+ ModelType specifics_type = GetModelTypeFromSpecifics(ref(SPECIFICS));
+ if (specifics_type != UNSPECIFIED)
+ return specifics_type;
+ if (ref(ID).IsRoot())
+ return TOP_LEVEL_FOLDER;
+ // Loose check for server-created top-level folders that aren't
+ // bound to a particular model type.
+ if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR))
+ return TOP_LEVEL_FOLDER;
+
+ return UNSPECIFIED;
+}
+
ModelType EntryKernel::GetServerModelType() const {
ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS));
if (specifics_type != UNSPECIFIED)
@@ -35,6 +49,13 @@ ModelType EntryKernel::GetServerModelType() const {
return UNSPECIFIED;
}
+bool EntryKernel::ShouldMaintainPosition() const {
+ // We maintain positions for all bookmarks, except those that are
+ // server-created top-level folders.
+ return (GetModelTypeFromSpecifics(ref(SPECIFICS)) == syncer::BOOKMARKS)
+ && !(!ref(UNIQUE_SERVER_TAG).empty() && ref(IS_DIR));
+}
+
namespace {
// Utility function to loop through a set of enum values and add the
@@ -96,10 +117,6 @@ base::StringValue* IdToValue(const Id& id) {
return id.ToValue();
}
-base::StringValue* OrdinalToValue(const NodeOrdinal& ord) {
- return new base::StringValue(ord.ToDebugString());
-}
-
base::FundamentalValue* BooleanToValue(bool bool_val) {
return new base::FundamentalValue(bool_val);
}
@@ -108,6 +125,10 @@ base::StringValue* StringToValue(const std::string& str) {
return new base::StringValue(str);
}
+StringValue* UniquePositionToValue(const UniquePosition& pos) {
+ return Value::CreateStringValue(pos.ToDebugString());
+}
+
} // namespace
base::DictionaryValue* EntryKernel::ToValue(
@@ -160,10 +181,10 @@ base::DictionaryValue* EntryKernel::ToValue(
SetEncryptableProtoValues(*this, cryptographer, kernel_info,
PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
- // Ordinal fields
+ // UniquePosition fields
SetFieldValues(*this, kernel_info,
- &GetOrdinalFieldString, &OrdinalToValue,
- ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
+ &GetUniquePositionFieldString, &UniquePositionToValue,
+ UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
// Bit temps.
SetFieldValues(*this, kernel_info,
diff --git a/sync/syncable/entry_kernel.h b/sync/syncable/entry_kernel.h
index e9cf828..16a6e97 100644
--- a/sync/syncable/entry_kernel.h
+++ b/sync/syncable/entry_kernel.h
@@ -11,7 +11,7 @@
#include "base/values.h"
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/internal_api/public/util/immutable.h"
#include "sync/protocol/sync.pb.h"
#include "sync/syncable/metahandle_set.h"
@@ -83,9 +83,6 @@ enum IdField {
ID = ID_FIELDS_BEGIN,
PARENT_ID,
SERVER_PARENT_ID,
-
- PREV_ID,
- NEXT_ID,
ID_FIELDS_END
};
@@ -127,6 +124,7 @@ enum StringField {
// identifies a singleton instance.
UNIQUE_SERVER_TAG, // Tagged by the server
UNIQUE_CLIENT_TAG, // Tagged by the client
+ UNIQUE_BOOKMARK_TAG, // Client tags for bookmark items
STRING_FIELDS_END,
};
@@ -146,21 +144,21 @@ enum ProtoField {
enum {
PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN,
- ORDINAL_FIELDS_BEGIN = PROTO_FIELDS_END
+ UNIQUE_POSITION_FIELDS_BEGIN = PROTO_FIELDS_END
};
-enum OrdinalField {
- // An Ordinal that identifies the relative ordering of this object
- // among its siblings.
- SERVER_ORDINAL_IN_PARENT = ORDINAL_FIELDS_BEGIN,
- ORDINAL_FIELDS_END
+enum UniquePositionField {
+ SERVER_UNIQUE_POSITION = UNIQUE_POSITION_FIELDS_BEGIN,
+ UNIQUE_POSITION,
+ UNIQUE_POSITION_FIELDS_END
};
enum {
- ORDINAL_FIELDS_COUNT = ORDINAL_FIELDS_END - ORDINAL_FIELDS_BEGIN,
- FIELD_COUNT = ORDINAL_FIELDS_END - BEGIN_FIELDS,
+ UNIQUE_POSITION_FIELDS_COUNT =
+ UNIQUE_POSITION_FIELDS_END - UNIQUE_POSITION_FIELDS_BEGIN,
+ FIELD_COUNT = UNIQUE_POSITION_FIELDS_END - BEGIN_FIELDS,
// Past this point we have temporaries, stored in memory only.
- BEGIN_TEMPS = ORDINAL_FIELDS_END,
+ BEGIN_TEMPS = UNIQUE_POSITION_FIELDS_END,
BIT_TEMPS_BEGIN = BEGIN_TEMPS,
};
@@ -184,7 +182,7 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
int64 int64_fields[INT64_FIELDS_COUNT];
base::Time time_fields[TIME_FIELDS_COUNT];
Id id_fields[ID_FIELDS_COUNT];
- NodeOrdinal ordinal_fields[ORDINAL_FIELDS_COUNT];
+ UniquePosition unique_position_fields[UNIQUE_POSITION_FIELDS_COUNT];
std::bitset<BIT_FIELDS_COUNT> bit_fields;
std::bitset<BIT_TEMPS_COUNT> bit_temps;
@@ -252,8 +250,8 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
}
- inline void put(OrdinalField field, const NodeOrdinal& value) {
- ordinal_fields[field - ORDINAL_FIELDS_BEGIN] = value;
+ inline void put(UniquePositionField field, const UniquePosition& value) {
+ unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN] = value;
}
inline void put(BitTemp field, bool value) {
bit_temps[field - BIT_TEMPS_BEGIN] = value;
@@ -290,8 +288,8 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
return specifics_fields[field - PROTO_FIELDS_BEGIN];
}
- inline const NodeOrdinal& ref(OrdinalField field) const {
- return ordinal_fields[field - ORDINAL_FIELDS_BEGIN];
+ inline const UniquePosition& ref(UniquePositionField field) const {
+ return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
}
inline bool ref(BitTemp field) const {
return bit_temps[field - BIT_TEMPS_BEGIN];
@@ -307,11 +305,13 @@ struct SYNC_EXPORT_PRIVATE EntryKernel {
inline Id& mutable_ref(IdField field) {
return id_fields[field - ID_FIELDS_BEGIN];
}
- inline NodeOrdinal& mutable_ref(OrdinalField field) {
- return ordinal_fields[field - ORDINAL_FIELDS_BEGIN];
+ inline UniquePosition& mutable_ref(UniquePositionField field) {
+ return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN];
}
+ ModelType GetModelType() const;
ModelType GetServerModelType() const;
+ bool ShouldMaintainPosition() const;
// Dumps all kernel info into a DictionaryValue and returns it.
// Transfers ownership of the DictionaryValue to the caller.
diff --git a/sync/syncable/in_memory_directory_backing_store.cc b/sync/syncable/in_memory_directory_backing_store.cc
index 9451354..f130fb5 100644
--- a/sync/syncable/in_memory_directory_backing_store.cc
+++ b/sync/syncable/in_memory_directory_backing_store.cc
@@ -8,7 +8,9 @@ namespace syncer {
namespace syncable {
InMemoryDirectoryBackingStore::InMemoryDirectoryBackingStore(
- const std::string& dir_name) : DirectoryBackingStore(dir_name) {
+ const std::string& dir_name)
+ : DirectoryBackingStore(dir_name),
+ consistent_cache_guid_requested_(false) {
}
DirOpenResult InMemoryDirectoryBackingStore::Load(
@@ -23,6 +25,13 @@ DirOpenResult InMemoryDirectoryBackingStore::Load(
if (!InitializeTables())
return FAILED_OPEN_DATABASE;
+ if (consistent_cache_guid_requested_) {
+ if (!db_->Execute("UPDATE share_info "
+ "SET cache_guid = 'IrcjZ2jyzHDV9Io4+zKcXQ=='")) {
+ return FAILED_OPEN_DATABASE;
+ }
+ }
+
if (!DropDeletedEntries())
return FAILED_DATABASE_CORRUPT;
if (!LoadEntries(entry_bucket))
diff --git a/sync/syncable/in_memory_directory_backing_store.h b/sync/syncable/in_memory_directory_backing_store.h
index 846d96a..3afa051 100644
--- a/sync/syncable/in_memory_directory_backing_store.h
+++ b/sync/syncable/in_memory_directory_backing_store.h
@@ -29,7 +29,13 @@ class SYNC_EXPORT_PRIVATE InMemoryDirectoryBackingStore
JournalIndex* delete_journals,
Directory::KernelLoadInfo* kernel_load_info) OVERRIDE;
+ void request_consistent_cache_guid() {
+ consistent_cache_guid_requested_ = true;
+ }
+
private:
+ bool consistent_cache_guid_requested_;
+
DISALLOW_COPY_AND_ASSIGN(InMemoryDirectoryBackingStore);
};
diff --git a/sync/syncable/model_type.cc b/sync/syncable/model_type.cc
index 31120a7..2657ebc 100644
--- a/sync/syncable/model_type.cc
+++ b/sync/syncable/model_type.cc
@@ -306,10 +306,6 @@ ModelType GetModelTypeFromSpecifics(const sync_pb::EntitySpecifics& specifics) {
return UNSPECIFIED;
}
-bool ShouldMaintainPosition(ModelType model_type) {
- return model_type == BOOKMARKS;
-}
-
ModelTypeSet ProtocolTypes() {
ModelTypeSet set = ModelTypeSet::All();
set.RemoveAll(ProxyTypes());
diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc
index 70b2a26..7c91135 100644
--- a/sync/syncable/mutable_entry.cc
+++ b/sync/syncable/mutable_entry.cc
@@ -5,10 +5,11 @@
#include "sync/syncable/mutable_entry.h"
#include "base/memory/scoped_ptr.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/syncable/directory.h"
#include "sync/syncable/scoped_index_updater.h"
#include "sync/syncable/scoped_kernel_lock.h"
+#include "sync/syncable/scoped_parent_child_index_updater.h"
#include "sync/syncable/syncable-inl.h"
#include "sync/syncable/syncable_changes_version.h"
#include "sync/syncable/syncable_util.h"
@@ -36,7 +37,6 @@ void MutableEntry::Init(WriteTransaction* trans,
kernel->put(MTIME, now);
// We match the database defaults here
kernel->put(BASE_VERSION, CHANGES_VERSION);
- kernel->put(SERVER_ORDINAL_IN_PARENT, NodeOrdinal::CreateInitialOrdinal());
// Normally the SPECIFICS setting code is wrapped in logic to deal with
// unknown fields and encryption. Since all we want to do here is ensure that
@@ -63,8 +63,19 @@ MutableEntry::MutableEntry(WriteTransaction* trans,
: Entry(trans),
write_transaction_(trans) {
Init(trans, model_type, parent_id, name);
- bool insert_result = trans->directory()->InsertEntry(trans, kernel_);
- DCHECK(insert_result);
+ // We need to have a valid position ready before we can index the item.
+ if (model_type == BOOKMARKS) {
+ // Base the tag off of our cache-guid and local "c-" style ID.
+ std::string unique_tag = syncable::GenerateSyncableBookmarkHash(
+ trans->directory()->cache_guid(), Get(ID).GetServerId());
+ kernel_->put(UNIQUE_BOOKMARK_TAG, unique_tag);
+ kernel_->put(UNIQUE_POSITION, UniquePosition::InitialPosition(unique_tag));
+ } else {
+ DCHECK(!ShouldMaintainPosition());
+ }
+
+ bool result = trans->directory()->InsertEntry(trans, kernel_);
+ DCHECK(result);
}
MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
@@ -80,7 +91,6 @@ MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
kernel->put(ID, id);
kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
- kernel->put(SERVER_ORDINAL_IN_PARENT, NodeOrdinal::CreateInitialOrdinal());
kernel->put(IS_DEL, true);
// We match the database defaults here
kernel->put(BASE_VERSION, CHANGES_VERSION);
@@ -118,10 +128,6 @@ bool MutableEntry::PutIsDel(bool is_del) {
return true;
}
if (is_del) {
- if (!UnlinkFromOrder()) {
- return false;
- }
-
// If the server never knew about this item and it's deleted then we don't
// need to keep it around. Unsetting IS_UNSYNCED will:
// - Ensure that the item is never committed to the server.
@@ -139,20 +145,13 @@ bool MutableEntry::PutIsDel(bool is_del) {
ScopedKernelLock lock(dir());
// Some indices don't include deleted items and must be updated
// upon a value change.
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
- dir()->kernel_->parent_id_child_index);
+ ScopedParentChildIndexUpdater updater(lock, kernel_,
+ dir()->kernel_->parent_child_index);
kernel_->put(IS_DEL, is_del);
kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
}
- if (!is_del)
- // Restores position to the 0th index.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
- }
-
return true;
}
@@ -185,11 +184,12 @@ bool MutableEntry::Put(IdField field, const Id& value) {
if (!dir()->ReindexId(write_transaction(), kernel_, value))
return false;
} else if (PARENT_ID == field) {
- PutParentIdPropertyOnly(value); // Makes sibling order inconsistent.
- // Fixes up the sibling order inconsistency.
- if (!PutPredecessor(Id())) {
- // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
- NOTREACHED();
+ PutParentIdPropertyOnly(value);
+ if (!Get(IS_DEL)) {
+ if (!PutPredecessor(Id())) {
+ // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
+ NOTREACHED();
+ }
}
} else {
kernel_->put(field, value);
@@ -199,15 +199,16 @@ bool MutableEntry::Put(IdField field, const Id& value) {
return true;
}
-bool MutableEntry::Put(OrdinalField field, const NodeOrdinal& value) {
+bool MutableEntry::Put(UniquePositionField field, const UniquePosition& value) {
DCHECK(kernel_);
- DCHECK(value.IsValid());
write_transaction_->SaveOriginal(kernel_);
if(!kernel_->ref(field).Equals(value)) {
+ // We should never overwrite a valid position with an invalid one.
+ DCHECK(value.IsValid());
ScopedKernelLock lock(dir());
- if (SERVER_ORDINAL_IN_PARENT == field) {
- ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(
- lock, kernel_, dir()->kernel_->parent_id_child_index);
+ if (UNIQUE_POSITION == field) {
+ ScopedParentChildIndexUpdater updater(
+ lock, kernel_, dir()->kernel_->parent_child_index);
kernel_->put(field, value);
} else {
kernel_->put(field, value);
@@ -375,67 +376,34 @@ bool MutableEntry::Put(IndexedBitField field, bool value) {
return true;
}
-bool MutableEntry::UnlinkFromOrder() {
- ScopedKernelLock lock(dir());
- return dir()->UnlinkEntryFromOrder(kernel_,
- write_transaction(),
- &lock,
- NODE_MANIPULATION);
+void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
+ // This unique tag will eventually be used as the unique suffix when adjusting
+ // this bookmark's position. Let's make sure it's a valid suffix.
+ if (!UniquePosition::IsValidSuffix(tag)) {
+ NOTREACHED();
+ return;
+ }
+
+ if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty()
+ && tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
+ // There is only one scenario where our tag is expected to change. That
+ // scenario occurs when our current tag is a non-correct tag assigned during
+ // the UniquePosition migration.
+ std::string migration_generated_tag =
+ GenerateSyncableBookmarkHash(std::string(),
+ kernel_->ref(ID).GetServerId());
+ DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
+ }
+
+ kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
+ kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
}
bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
- if (!UnlinkFromOrder())
+ MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id);
+ if (!predecessor.good())
return false;
-
- if (Get(IS_DEL)) {
- DCHECK(predecessor_id.IsNull());
- return true;
- }
-
- // TODO(ncarter): It should be possible to not maintain position for
- // non-bookmark items. However, we'd need to robustly handle all possible
- // permutations of setting IS_DEL and the SPECIFICS to identify the
- // object type; or else, we'd need to add a ModelType to the
- // MutableEntry's Create ctor.
- // if (!ShouldMaintainPosition()) {
- // return false;
- // }
-
- // This is classic insert-into-doubly-linked-list from CS 101 and your last
- // job interview. An "IsRoot" Id signifies the head or tail.
- Id successor_id;
- if (!predecessor_id.IsRoot()) {
- MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
- if (!predecessor.good()) {
- LOG(ERROR) << "Predecessor is not good : "
- << predecessor_id.GetServerId();
- return false;
- }
- if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor_id = predecessor.GetSuccessorId();
- predecessor.Put(NEXT_ID, Get(ID));
- } else {
- syncable::Directory* dir = trans()->directory();
- if (!dir->GetFirstChildId(trans(), Get(PARENT_ID), &successor_id)) {
- return false;
- }
- }
- if (!successor_id.IsRoot()) {
- MutableEntry successor(write_transaction(), GET_BY_ID, successor_id);
- if (!successor.good()) {
- LOG(ERROR) << "Successor is not good: "
- << successor_id.GetServerId();
- return false;
- }
- if (successor.Get(PARENT_ID) != Get(PARENT_ID))
- return false;
- successor.Put(PREV_ID, Get(ID));
- }
- DCHECK(predecessor_id != Get(ID));
- DCHECK(successor_id != Get(ID));
- Put(PREV_ID, predecessor_id);
- Put(NEXT_ID, successor_id);
+ dir()->PutPredecessor(kernel_, predecessor.kernel_);
return true;
}
diff --git a/sync/syncable/mutable_entry.h b/sync/syncable/mutable_entry.h
index 51cd794..134e4a9 100644
--- a/sync/syncable/mutable_entry.h
+++ b/sync/syncable/mutable_entry.h
@@ -7,7 +7,6 @@
#include "sync/base/sync_export.h"
#include "sync/internal_api/public/base/model_type.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/syncable/entry.h"
#include "sync/syncable/metahandle_set.h"
@@ -52,7 +51,7 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
bool Put(Int64Field field, const int64& value);
bool Put(TimeField field, const base::Time& value);
bool Put(IdField field, const Id& value);
- bool Put(OrdinalField field, const NodeOrdinal& value);
+ bool Put(UniquePositionField field, const UniquePosition& value);
// Do a simple property-only update if the PARENT_ID field. Use with caution.
//
@@ -75,6 +74,8 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public Entry {
}
bool Put(IndexedBitField field, bool value);
+ void PutUniqueBookmarkTag(const std::string& tag);
+
// Sets the position of this item, and updates the entry kernels of the
// adjacent siblings so that list invariants are maintained. Returns false
// and fails if |predecessor_id| does not identify a sibling. Pass the root
diff --git a/sync/syncable/nigori_util.cc b/sync/syncable/nigori_util.cc
index 4888f66..1b00dbd 100644
--- a/sync/syncable/nigori_util.cc
+++ b/sync/syncable/nigori_util.cc
@@ -108,12 +108,7 @@ bool VerifyDataTypeEncryptionForTest(
}
std::queue<Id> to_visit;
- Id id_string;
- if (!trans->directory()->GetFirstChildId(
- trans, type_root.Get(ID), &id_string)) {
- NOTREACHED();
- return false;
- }
+ Id id_string = type_root.GetFirstChildId();
to_visit.push(id_string);
while (!to_visit.empty()) {
id_string = to_visit.front();
@@ -127,12 +122,7 @@ bool VerifyDataTypeEncryptionForTest(
return false;
}
if (child.Get(IS_DIR)) {
- Id child_id_string;
- if (!trans->directory()->GetFirstChildId(
- trans, child.Get(ID), &child_id_string)) {
- NOTREACHED();
- return false;
- }
+ Id child_id_string = child.GetFirstChildId();
// Traverse the children.
to_visit.push(child_id_string);
}
diff --git a/sync/syncable/parent_child_index.cc b/sync/syncable/parent_child_index.cc
new file mode 100644
index 0000000..71fb92e
--- /dev/null
+++ b/sync/syncable/parent_child_index.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/parent_child_index.h"
+
+#include "base/stl_util.h"
+
+#include "sync/syncable/entry_kernel.h"
+#include "sync/syncable/syncable_id.h"
+
+namespace syncer {
+namespace syncable {
+
+bool ChildComparator::operator()(
+ const syncable::EntryKernel* a,
+ const syncable::EntryKernel* b) const {
+ const UniquePosition& a_pos = a->ref(UNIQUE_POSITION);
+ const UniquePosition& b_pos = b->ref(UNIQUE_POSITION);
+
+ if (a_pos.IsValid() && b_pos.IsValid()) {
+ // Position is important to this type.
+ return a_pos.LessThan(b_pos);
+ } else if (a_pos.IsValid() && !b_pos.IsValid()) {
+ // TODO(rlarocque): Remove this case.
+ // An item with valid position as sibling of one with invalid position.
+ // We should not support this, but the tests rely on it. For now, just
+ // move all invalid position items to the right.
+ return true;
+ } else if (!a_pos.IsValid() && b_pos.IsValid()) {
+ // TODO(rlarocque): Remove this case.
+ // Mirror of the above case.
+ return false;
+ } else {
+ // Position doesn't matter.
+ DCHECK(!a->ref(UNIQUE_POSITION).IsValid());
+ DCHECK(!b->ref(UNIQUE_POSITION).IsValid());
+ return a->ref(ID) < b->ref(ID);
+ }
+}
+
+ParentChildIndex::ParentChildIndex() {
+}
+
+ParentChildIndex::~ParentChildIndex() {
+ STLDeleteContainerPairSecondPointers(
+ parent_children_map_.begin(), parent_children_map_.end());
+}
+
+bool ParentChildIndex::ShouldInclude(const EntryKernel* entry) {
+ // This index excludes deleted items and the root item. The root
+ // item is excluded so that it doesn't show up as a child of itself.
+ return !entry->ref(IS_DEL) && !entry->ref(ID).IsRoot();
+}
+
+bool ParentChildIndex::Insert(EntryKernel* entry) {
+ DCHECK(ShouldInclude(entry));
+
+ const syncable::Id& parent_id = entry->ref(PARENT_ID);
+ OrderedChildSet* children = NULL;
+ ParentChildrenMap::iterator i = parent_children_map_.find(parent_id);
+ if (i != parent_children_map_.end()) {
+ children = i->second;
+ } else {
+ children = new OrderedChildSet();
+ parent_children_map_.insert(std::make_pair(parent_id, children));
+ }
+
+ return children->insert(entry).second;
+}
+
+// Like the other containers used to help support the syncable::Directory, this
+// one does not own any EntryKernels. This function removes references to the
+// given EntryKernel but does not delete it.
+void ParentChildIndex::Remove(EntryKernel* e) {
+ ParentChildrenMap::iterator parent =
+ parent_children_map_.find(e->ref(PARENT_ID));
+ DCHECK(parent != parent_children_map_.end());
+
+ OrderedChildSet* children = parent->second;
+ OrderedChildSet::iterator j = children->find(e);
+ DCHECK(j != children->end());
+
+ children->erase(j);
+ if (children->empty()) {
+ delete children;
+ parent_children_map_.erase(parent);
+ }
+}
+
+bool ParentChildIndex::Contains(EntryKernel *e) const {
+ const syncable::Id& parent_id = e->ref(PARENT_ID);
+ ParentChildrenMap::const_iterator parent =
+ parent_children_map_.find(parent_id);
+ if (parent == parent_children_map_.end()) {
+ return false;
+ }
+ const OrderedChildSet* children = parent->second;
+ DCHECK(children && !children->empty());
+ return children->count(e) > 0;
+}
+
+const OrderedChildSet* ParentChildIndex::GetChildren(const syncable::Id& id) {
+ ParentChildrenMap::iterator parent = parent_children_map_.find(id);
+ if (parent == parent_children_map_.end()) {
+ return NULL;
+ }
+
+ // A successful lookup implies at least some children exist.
+ DCHECK(!parent->second->empty());
+ return parent->second;
+}
+
+} // namespace syncable
+} // namespace syncer
diff --git a/sync/syncable/parent_child_index.h b/sync/syncable/parent_child_index.h
new file mode 100644
index 0000000..fd0f2e8
--- /dev/null
+++ b/sync/syncable/parent_child_index.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX
+#define SYNC_SYNCABLE_PARENT_CHILD_INDEX
+
+#include <map>
+#include <set>
+
+#include "base/basictypes.h"
+#include "sync/base/sync_export.h"
+
+namespace syncer {
+namespace syncable {
+
+struct EntryKernel;
+class Id;
+class ParentChildIndex;
+
+// A node ordering function.
+struct SYNC_EXPORT_PRIVATE ChildComparator {
+ bool operator() (const EntryKernel* a, const EntryKernel* b) const;
+};
+
+// An ordered set of nodes.
+typedef std::set<EntryKernel*, ChildComparator> OrderedChildSet;
+
+// Container that tracks parent-child relationships.
+// Provides fast lookup of all items under a given parent.
+class SYNC_EXPORT_PRIVATE ParentChildIndex {
+ public:
+ ParentChildIndex();
+ ~ParentChildIndex();
+
+ // Returns whether or not this entry belongs in the index.
+ // True for all non-deleted, non-root entries.
+ static bool ShouldInclude(const EntryKernel* e);
+
+ // Inserts a given child into the index.
+ bool Insert(EntryKernel* e);
+
+ // Removes a given child from the index.
+ void Remove(EntryKernel* e);
+
+ // Returns true if this item is in the index as a child.
+ bool Contains(EntryKernel* e) const;
+
+ // Returns all children of the entry with the given Id. Returns NULL if the
+ // node has no children or the Id does not identify a valid directory node.
+ const OrderedChildSet* GetChildren(const Id& id);
+
+ private:
+ typedef std::map<syncable::Id, OrderedChildSet*> ParentChildrenMap;
+
+ // A map of parent IDs to children.
+ // Parents with no children are not included in this map.
+ ParentChildrenMap parent_children_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParentChildIndex);
+};
+
+} // namespace syncable
+} // namespace syncer
+
+#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX
diff --git a/sync/syncable/parent_child_index_unittest.cc b/sync/syncable/parent_child_index_unittest.cc
new file mode 100644
index 0000000..a2058c5
--- /dev/null
+++ b/sync/syncable/parent_child_index_unittest.cc
@@ -0,0 +1,344 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/parent_child_index.h"
+
+#include <list>
+
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "sync/syncable/entry_kernel.h"
+#include "sync/syncable/syncable_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace syncer {
+namespace syncable {
+
+namespace {
+
+static const std::string kCacheGuid = "8HhNIHlEOCGQbIAALr9QEg==";
+
+class ParentChildIndexTest : public testing::Test {
+ public:
+ void TearDown() {
+ // To make memory management easier, we take ownership of all EntryKernels
+ // returned by our factory methods and delete them here.
+ STLDeleteElements(&owned_entry_kernels_);
+ }
+
+ // Unfortunately, we can't use the regular Entry factory methods, because the
+ // ParentChildIndex deals in EntryKernels.
+
+ static syncable::Id GetBookmarkRootId() {
+ return syncable::Id::CreateFromServerId("bookmark_folder");
+ }
+
+ static syncable::Id GetBookmarkId(int n) {
+ return syncable::Id::CreateFromServerId("b" + base::IntToString(n));
+ }
+
+ static syncable::Id GetClientUniqueId(int n) {
+ return syncable::Id::CreateFromServerId("c" + base::IntToString(n));
+ }
+
+ EntryKernel* MakeRoot() {
+ // Mimics the root node.
+ EntryKernel* root = new EntryKernel();
+ root->put(META_HANDLE, 1);
+ root->put(BASE_VERSION, -1);
+ root->put(SERVER_VERSION, 0);
+ root->put(IS_DIR, true);
+ root->put(ID, syncable::Id());
+ root->put(PARENT_ID, syncable::Id());
+ root->put(SERVER_PARENT_ID, syncable::Id());
+
+ owned_entry_kernels_.push_back(root);
+ return root;
+ }
+
+ EntryKernel* MakeBookmarkRoot() {
+ // Mimics a server-created bookmark folder.
+ EntryKernel* folder = new EntryKernel;
+ folder->put(META_HANDLE, 1);
+ folder->put(BASE_VERSION, 9);
+ folder->put(SERVER_VERSION, 9);
+ folder->put(IS_DIR, true);
+ folder->put(ID, GetBookmarkRootId());
+ folder->put(SERVER_PARENT_ID, syncable::Id());
+ folder->put(PARENT_ID, syncable::Id());
+ folder->put(UNIQUE_SERVER_TAG, "google_chrome_bookmarks");
+
+ owned_entry_kernels_.push_back(folder);
+ return folder;
+ }
+
+ EntryKernel* MakeBookmark(int n, int pos, bool is_dir) {
+ // Mimics a regular bookmark or folder.
+ EntryKernel* bm = new EntryKernel();
+ bm->put(META_HANDLE, n);
+ bm->put(BASE_VERSION, 10);
+ bm->put(SERVER_VERSION, 10);
+ bm->put(IS_DIR, is_dir);
+ bm->put(ID, GetBookmarkId(n));
+ bm->put(PARENT_ID, GetBookmarkRootId());
+ bm->put(SERVER_PARENT_ID, GetBookmarkRootId());
+
+ bm->put(UNIQUE_BOOKMARK_TAG,
+ syncable::GenerateSyncableBookmarkHash(kCacheGuid,
+ bm->ref(ID).GetServerId()));
+
+ UniquePosition unique_pos =
+ UniquePosition::FromInt64(pos, bm->ref(UNIQUE_BOOKMARK_TAG));
+ bm->put(UNIQUE_POSITION, unique_pos);
+ bm->put(SERVER_UNIQUE_POSITION, unique_pos);
+
+ owned_entry_kernels_.push_back(bm);
+ return bm;
+ }
+
+ EntryKernel* MakeUniqueClientItem(int n) {
+ EntryKernel* item = new EntryKernel();
+ item->put(META_HANDLE, n);
+ item->put(BASE_VERSION, 10);
+ item->put(SERVER_VERSION, 10);
+ item->put(IS_DIR, false);
+ item->put(ID, GetClientUniqueId(n));
+ item->put(PARENT_ID, syncable::Id());
+ item->put(SERVER_PARENT_ID, syncable::Id());
+ item->put(UNIQUE_CLIENT_TAG, base::IntToString(n));
+
+ owned_entry_kernels_.push_back(item);
+ return item;
+ }
+
+ ParentChildIndex index_;
+
+ private:
+ std::list<EntryKernel*> owned_entry_kernels_;
+};
+
+TEST_F(ParentChildIndexTest, TestRootNode) {
+ EntryKernel* root = MakeRoot();
+ EXPECT_FALSE(ParentChildIndex::ShouldInclude(root));
+}
+
+TEST_F(ParentChildIndexTest, TestBookmarkRootFolder) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ EXPECT_TRUE(ParentChildIndex::ShouldInclude(bm_folder));
+}
+
+// Tests iteration over a set of siblings.
+TEST_F(ParentChildIndexTest, ChildInsertionAndIteration) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Make some folder and non-folder entries.
+ EntryKernel* b1 = MakeBookmark(1, 1, false);
+ EntryKernel* b2 = MakeBookmark(2, 2, false);
+ EntryKernel* b3 = MakeBookmark(3, 3, true);
+ EntryKernel* b4 = MakeBookmark(4, 4, false);
+
+ // Insert them out-of-order to test different cases.
+ index_.Insert(b3); // Only child.
+ index_.Insert(b4); // Right-most child.
+ index_.Insert(b1); // Left-most child.
+ index_.Insert(b2); // Between existing items.
+
+ // Double-check they've been added.
+ EXPECT_TRUE(index_.Contains(b1));
+ EXPECT_TRUE(index_.Contains(b2));
+ EXPECT_TRUE(index_.Contains(b3));
+ EXPECT_TRUE(index_.Contains(b4));
+
+ // Check the ordering.
+ const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(children);
+ ASSERT_EQ(children->size(), 4UL);
+ OrderedChildSet::const_iterator it = children->begin();
+ EXPECT_EQ(*it, b1);
+ it++;
+ EXPECT_EQ(*it, b2);
+ it++;
+ EXPECT_EQ(*it, b3);
+ it++;
+ EXPECT_EQ(*it, b4);
+ it++;
+ EXPECT_TRUE(it == children->end());
+}
+
+// Tests iteration when hierarchy is involved.
+TEST_F(ParentChildIndexTest, ChildInsertionAndIterationWithHierarchy) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Just below the root, we have folders f1 and f2.
+ EntryKernel* f1 = MakeBookmark(1, 1, false);
+ EntryKernel* f2 = MakeBookmark(2, 2, false);
+ EntryKernel* f3 = MakeBookmark(3, 3, false);
+
+ // Under folder f1, we have two bookmarks.
+ EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
+ f1_b1->put(PARENT_ID, GetBookmarkId(1));
+ EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
+ f1_b2->put(PARENT_ID, GetBookmarkId(1));
+
+ // Under folder f2, there is one bookmark.
+ EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
+ f2_b1->put(PARENT_ID, GetBookmarkId(2));
+
+ // Under folder f3, there is nothing.
+
+ // Insert in a strange order, because we can.
+ index_.Insert(f1_b2);
+ index_.Insert(f2);
+ index_.Insert(f2_b1);
+ index_.Insert(f1);
+ index_.Insert(f1_b1);
+ index_.Insert(f3);
+
+ OrderedChildSet::const_iterator it;
+
+ // Iterate over children of the bookmark root.
+ const OrderedChildSet* top_children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(top_children);
+ ASSERT_EQ(top_children->size(), 3UL);
+ it = top_children->begin();
+ EXPECT_EQ(*it, f1);
+ it++;
+ EXPECT_EQ(*it, f2);
+ it++;
+ EXPECT_EQ(*it, f3);
+ it++;
+ EXPECT_TRUE(it == top_children->end());
+
+ // Iterate over children of the first folder.
+ const OrderedChildSet* f1_children = index_.GetChildren(GetBookmarkId(1));
+ ASSERT_TRUE(f1_children);
+ ASSERT_EQ(f1_children->size(), 2UL);
+ it = f1_children->begin();
+ EXPECT_EQ(*it, f1_b1);
+ it++;
+ EXPECT_EQ(*it, f1_b2);
+ it++;
+ EXPECT_TRUE(it == f1_children->end());
+
+ // Iterate over children of the second folder.
+ const OrderedChildSet* f2_children = index_.GetChildren(GetBookmarkId(2));
+ ASSERT_TRUE(f2_children);
+ ASSERT_EQ(f2_children->size(), 1UL);
+ it = f2_children->begin();
+ EXPECT_EQ(*it, f2_b1);
+ it++;
+ EXPECT_TRUE(it == f2_children->end());
+
+ // Check for children of the third folder.
+ const OrderedChildSet* f3_children = index_.GetChildren(GetBookmarkId(3));
+ EXPECT_FALSE(f3_children);
+}
+
+// Tests removing items.
+TEST_F(ParentChildIndexTest, RemoveWithHierarchy) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ // Just below the root, we have folders f1 and f2.
+ EntryKernel* f1 = MakeBookmark(1, 1, false);
+ EntryKernel* f2 = MakeBookmark(2, 2, false);
+ EntryKernel* f3 = MakeBookmark(3, 3, false);
+
+ // Under folder f1, we have two bookmarks.
+ EntryKernel* f1_b1 = MakeBookmark(101, 1, false);
+ f1_b1->put(PARENT_ID, GetBookmarkId(1));
+ EntryKernel* f1_b2 = MakeBookmark(102, 2, false);
+ f1_b2->put(PARENT_ID, GetBookmarkId(1));
+
+ // Under folder f2, there is one bookmark.
+ EntryKernel* f2_b1 = MakeBookmark(201, 1, false);
+ f2_b1->put(PARENT_ID, GetBookmarkId(2));
+
+ // Under folder f3, there is nothing.
+
+ // Insert in any order.
+ index_.Insert(f2_b1);
+ index_.Insert(f3);
+ index_.Insert(f1_b2);
+ index_.Insert(f1);
+ index_.Insert(f2);
+ index_.Insert(f1_b1);
+
+ // Check that all are in the index.
+ EXPECT_TRUE(index_.Contains(f1));
+ EXPECT_TRUE(index_.Contains(f2));
+ EXPECT_TRUE(index_.Contains(f3));
+ EXPECT_TRUE(index_.Contains(f1_b1));
+ EXPECT_TRUE(index_.Contains(f1_b2));
+ EXPECT_TRUE(index_.Contains(f2_b1));
+
+ // Remove them all in any order.
+ index_.Remove(f3);
+ EXPECT_FALSE(index_.Contains(f3));
+ index_.Remove(f1_b2);
+ EXPECT_FALSE(index_.Contains(f1_b2));
+ index_.Remove(f2_b1);
+ EXPECT_FALSE(index_.Contains(f2_b1));
+ index_.Remove(f1);
+ EXPECT_FALSE(index_.Contains(f1));
+ index_.Remove(f2);
+ EXPECT_FALSE(index_.Contains(f2));
+ index_.Remove(f1_b1);
+ EXPECT_FALSE(index_.Contains(f1_b1));
+}
+
+// Test that involves two non-ordered items.
+TEST_F(ParentChildIndexTest, UnorderedChildren) {
+ // Make two unique client tag items under the root node.
+ EntryKernel* u1 = MakeUniqueClientItem(1);
+ EntryKernel* u2 = MakeUniqueClientItem(2);
+
+ EXPECT_FALSE(u1->ShouldMaintainPosition());
+ EXPECT_FALSE(u2->ShouldMaintainPosition());
+
+ index_.Insert(u1);
+ index_.Insert(u2);
+
+ const OrderedChildSet* children = index_.GetChildren(syncable::Id());
+ EXPECT_EQ(children->count(u1), 1UL);
+ EXPECT_EQ(children->count(u2), 1UL);
+ EXPECT_EQ(children->size(), 2UL);
+}
+
+// Test ordered and non-ordered entries under the same parent.
+// TODO(rlarocque): We should not need to support this.
+TEST_F(ParentChildIndexTest, OrderedAndUnorderedChildren) {
+ EntryKernel* bm_folder = MakeBookmarkRoot();
+ index_.Insert(bm_folder);
+
+ EntryKernel* b1 = MakeBookmark(1, 1, false);
+ EntryKernel* b2 = MakeBookmark(2, 2, false);
+ EntryKernel* u1 = MakeUniqueClientItem(1);
+ u1->put(PARENT_ID, GetBookmarkRootId());
+
+ index_.Insert(b1);
+ index_.Insert(u1);
+ index_.Insert(b2);
+
+ const OrderedChildSet* children = index_.GetChildren(GetBookmarkRootId());
+ ASSERT_TRUE(children);
+ EXPECT_EQ(children->size(), 3UL);
+
+ // Ensure that the non-positionable item is moved to the far right.
+ OrderedChildSet::const_iterator it = children->begin();
+ EXPECT_EQ(*it, b1);
+ it++;
+ EXPECT_EQ(*it, b2);
+ it++;
+ EXPECT_EQ(*it, u1);
+ it++;
+ EXPECT_TRUE(it == children->end());
+}
+
+} // namespace
+} // namespace syncable
+} // namespace syncer
+
diff --git a/sync/syncable/scoped_parent_child_index_updater.cc b/sync/syncable/scoped_parent_child_index_updater.cc
new file mode 100644
index 0000000..0dc3e95
--- /dev/null
+++ b/sync/syncable/scoped_parent_child_index_updater.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sync/syncable/scoped_parent_child_index_updater.h"
+
+#include "sync/syncable/parent_child_index.h"
+
+namespace syncer {
+namespace syncable {
+
+ScopedParentChildIndexUpdater::ScopedParentChildIndexUpdater(
+ ScopedKernelLock& proof_of_lock,
+ EntryKernel* entry,
+ ParentChildIndex* index) : entry_(entry), index_(index) {
+ if (ParentChildIndex::ShouldInclude(entry_)) {
+ index_->Remove(entry_);
+ }
+}
+
+ScopedParentChildIndexUpdater::~ScopedParentChildIndexUpdater() {
+ if (ParentChildIndex::ShouldInclude(entry_)) {
+ index_->Insert(entry_);
+ }
+}
+
+} // namespace syncer
+} // namespace syncable
diff --git a/sync/syncable/scoped_parent_child_index_updater.h b/sync/syncable/scoped_parent_child_index_updater.h
new file mode 100644
index 0000000..89385feb
--- /dev/null
+++ b/sync/syncable/scoped_parent_child_index_updater.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
+#define SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
+
+#include "base/basictypes.h"
+#include "sync/base/sync_export.h"
+
+namespace syncer {
+namespace syncable {
+
+class ParentChildIndex;
+class ScopedKernelLock;
+struct EntryKernel;
+
+// Temporarily removes an item from the ParentChildIndex and re-adds it this
+// object goes out of scope.
+class ScopedParentChildIndexUpdater {
+ public:
+ ScopedParentChildIndexUpdater(ScopedKernelLock& proof_of_lock,
+ EntryKernel* entry,
+ ParentChildIndex* index);
+ ~ScopedParentChildIndexUpdater();
+
+ private:
+ EntryKernel* entry_;
+ ParentChildIndex* index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedParentChildIndexUpdater);
+};
+
+} // namespace syncer
+} // namespace syncable
+
+#endif // SYNC_SYNCABLE_PARENT_CHILD_INDEX_UPDATER_H_
diff --git a/sync/syncable/syncable_columns.h b/sync/syncable/syncable_columns.h
index 6eda058..9d45c74 100644
--- a/sync/syncable/syncable_columns.h
+++ b/sync/syncable/syncable_columns.h
@@ -37,8 +37,6 @@ static const ColumnSpec g_metas_columns[] = {
{"id", "varchar(255) default \"r\""},
{"parent_id", "varchar(255) default \"r\""},
{"server_parent_id", "varchar(255) default \"r\""},
- {"prev_id", "varchar(255) default \"r\""},
- {"next_id", "varchar(255) default \"r\""},
//////////////////////////////////////
// bits
{"is_unsynced", "bit default 0"},
@@ -53,14 +51,16 @@ static const ColumnSpec g_metas_columns[] = {
{"server_non_unique_name", "varchar(255)"},
{"unique_server_tag", "varchar"},
{"unique_client_tag", "varchar"},
+ {"unique_bookmark_tag", "varchar"},
//////////////////////////////////////
// Blobs (serialized protos).
{"specifics", "blob"},
{"server_specifics", "blob"},
{"base_server_specifics", "blob"},
//////////////////////////////////////
- // Blobs (ordinals).
- {"server_ordinal_in_parent", "blob"},
+ // Blobs (positions).
+ {"server_unique_position", "blob"},
+ {"unique_position", "blob"},
};
// At least enforce that there are equal number of column names and fields.
diff --git a/sync/syncable/syncable_enum_conversions.cc b/sync/syncable/syncable_enum_conversions.cc
index b4425a6..8f27912 100644
--- a/sync/syncable/syncable_enum_conversions.cc
+++ b/sync/syncable/syncable_enum_conversions.cc
@@ -72,14 +72,12 @@ const char* GetTimeFieldString(TimeField time_field) {
}
const char* GetIdFieldString(IdField id_field) {
- ASSERT_ENUM_BOUNDS(ID, NEXT_ID,
+ ASSERT_ENUM_BOUNDS(ID, SERVER_PARENT_ID,
ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
switch (id_field) {
ENUM_CASE(ID);
ENUM_CASE(PARENT_ID);
ENUM_CASE(SERVER_PARENT_ID);
- ENUM_CASE(PREV_ID);
- ENUM_CASE(NEXT_ID);
case ID_FIELDS_END: break;
}
NOTREACHED();
@@ -122,13 +120,14 @@ const char* GetBitFieldString(BitField bit_field) {
}
const char* GetStringFieldString(StringField string_field) {
- ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_CLIENT_TAG,
+ ASSERT_ENUM_BOUNDS(NON_UNIQUE_NAME, UNIQUE_BOOKMARK_TAG,
STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
switch (string_field) {
ENUM_CASE(NON_UNIQUE_NAME);
ENUM_CASE(SERVER_NON_UNIQUE_NAME);
ENUM_CASE(UNIQUE_SERVER_TAG);
ENUM_CASE(UNIQUE_CLIENT_TAG);
+ ENUM_CASE(UNIQUE_BOOKMARK_TAG);
case STRING_FIELDS_END: break;
}
NOTREACHED();
@@ -148,12 +147,14 @@ const char* GetProtoFieldString(ProtoField proto_field) {
return "";
}
-const char* GetOrdinalFieldString(OrdinalField ordinal_field) {
- ASSERT_ENUM_BOUNDS(SERVER_ORDINAL_IN_PARENT, SERVER_ORDINAL_IN_PARENT,
- ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
- switch(ordinal_field) {
- ENUM_CASE(SERVER_ORDINAL_IN_PARENT);
- case ORDINAL_FIELDS_END: break;
+const char* GetUniquePositionFieldString(UniquePositionField position_field) {
+ ASSERT_ENUM_BOUNDS(SERVER_UNIQUE_POSITION, UNIQUE_POSITION,
+ UNIQUE_POSITION_FIELDS_BEGIN,
+ UNIQUE_POSITION_FIELDS_END - 1);
+ switch(position_field) {
+ ENUM_CASE(SERVER_UNIQUE_POSITION);
+ ENUM_CASE(UNIQUE_POSITION);
+ case UNIQUE_POSITION_FIELDS_END: break;
}
NOTREACHED();
return "";
diff --git a/sync/syncable/syncable_enum_conversions.h b/sync/syncable/syncable_enum_conversions.h
index 3727420..12f6428 100644
--- a/sync/syncable/syncable_enum_conversions.h
+++ b/sync/syncable/syncable_enum_conversions.h
@@ -41,8 +41,8 @@ SYNC_EXPORT_PRIVATE const char* GetStringFieldString(StringField string_field);
SYNC_EXPORT_PRIVATE const char* GetProtoFieldString(ProtoField proto_field);
-SYNC_EXPORT_PRIVATE const char* GetOrdinalFieldString(
- OrdinalField ordinal_field);
+SYNC_EXPORT_PRIVATE const char* GetUniquePositionFieldString(
+ UniquePositionField position_field);
SYNC_EXPORT_PRIVATE const char* GetBitTempString(BitTemp bit_temp);
diff --git a/sync/syncable/syncable_enum_conversions_unittest.cc b/sync/syncable/syncable_enum_conversions_unittest.cc
index eac4a37..f74d130 100644
--- a/sync/syncable/syncable_enum_conversions_unittest.cc
+++ b/sync/syncable/syncable_enum_conversions_unittest.cc
@@ -77,9 +77,10 @@ TEST_F(SyncableEnumConversionsTest, GetProtoFieldString) {
GetProtoFieldString, PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
}
-TEST_F(SyncableEnumConversionsTest, GetOrdinalFieldString) {
+TEST_F(SyncableEnumConversionsTest, GetUniquePositionFieldString) {
TestEnumStringFunction(
- GetOrdinalFieldString, ORDINAL_FIELDS_BEGIN, ORDINAL_FIELDS_END - 1);
+ GetUniquePositionFieldString,
+ UNIQUE_POSITION_FIELDS_BEGIN, UNIQUE_POSITION_FIELDS_END - 1);
}
TEST_F(SyncableEnumConversionsTest, GetBitTempString) {
diff --git a/sync/syncable/syncable_unittest.cc b/sync/syncable/syncable_unittest.cc
index b4772b9..f99ad23 100644
--- a/sync/syncable/syncable_unittest.cc
+++ b/sync/syncable/syncable_unittest.cc
@@ -19,7 +19,6 @@
#include "base/test/values_test_util.h"
#include "base/threading/platform_thread.h"
#include "base/values.h"
-#include "sync/internal_api/public/base/node_ordinal.h"
#include "sync/protocol/bookmark_specifics.pb.h"
#include "sync/syncable/directory_backing_store.h"
#include "sync/syncable/directory_change_delegate.h"
@@ -230,10 +229,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry e(&rtrans, GET_BY_ID, id);
ASSERT_FALSE(e.good()); // Hasn't been written yet.
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
+ EXPECT_TRUE(root.GetFirstChildId().IsRoot());
}
{
@@ -254,10 +253,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry child(&rtrans, GET_BY_HANDLE, written_metahandle);
ASSERT_TRUE(child.good());
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_TRUE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_EQ(e.Get(ID), child_id);
+ EXPECT_EQ(e.Get(ID), root.GetFirstChildId());
}
{
@@ -273,10 +272,10 @@ TEST_F(SyncableGeneralTest, ChildrenOps) {
Entry e(&rtrans, GET_BY_ID, id);
ASSERT_TRUE(e.good());
+ Entry root(&rtrans, GET_BY_ID, rtrans.root_id());
+ ASSERT_TRUE(root.good());
EXPECT_FALSE(dir.HasChildren(&rtrans, rtrans.root_id()));
- Id child_id;
- EXPECT_TRUE(dir.GetFirstChildId(&rtrans, rtrans.root_id(), &child_id));
- EXPECT_TRUE(child_id.IsRoot());
+ EXPECT_TRUE(root.GetFirstChildId().IsRoot());
}
dir.SaveChanges();
@@ -417,6 +416,32 @@ TEST_F(SyncableGeneralTest, ToValue) {
dir.SaveChanges();
}
+// Test that the bookmark tag generation algorithm remains unchanged.
+TEST_F(SyncableGeneralTest, BookmarkTagTest) {
+ InMemoryDirectoryBackingStore* store = new InMemoryDirectoryBackingStore("x");
+
+ // The two inputs that form the bookmark tag are the directory's cache_guid
+ // and its next_id value. We don't need to take any action to ensure
+ // consistent next_id values, but we do need to explicitly request that our
+ // InMemoryDirectoryBackingStore always return the same cache_guid.
+ store->request_consistent_cache_guid();
+
+ Directory dir(store, &handler_, NULL, NULL, NULL);
+ ASSERT_EQ(OPENED, dir.Open("x", &delegate_, NullTransactionObserver()));
+
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, &dir);
+ MutableEntry bm(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "bm");
+ bm.Put(IS_UNSYNCED, true);
+
+ // If this assertion fails, that might indicate that the algorithm used to
+ // generate bookmark tags has been modified. This could have implications
+ // for bookmark ordering. Please make sure you know what you're doing if
+ // you intend to make such a change.
+ ASSERT_EQ("6wHRAb3kbnXV5GHrejp4/c1y5tw=", bm.Get(UNIQUE_BOOKMARK_TAG));
+ }
+}
+
// A test fixture for syncable::Directory. Uses an in-memory database to keep
// the unit tests fast.
class SyncableDirectoryTest : public testing::Test {
@@ -1496,12 +1521,19 @@ TEST_F(SyncableDirectoryTest, OldClientLeftUnsyncedDeletedLocalItem) {
}
}
-TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
+TEST_F(SyncableDirectoryTest, PositionWithNullSurvivesSaveAndReload) {
TestIdFactory id_factory;
Id null_child_id;
const char null_cstr[] = "\0null\0test";
std::string null_str(null_cstr, arraysize(null_cstr) - 1);
- NodeOrdinal null_ord = NodeOrdinal(null_str);
+ // Pad up to the minimum length with 0x7f characters, then add a string that
+ // contains a few NULLs to the end. This is slightly wrong, since the suffix
+ // part of a UniquePosition shouldn't contain NULLs, but it's good enough for
+ // this test.
+ std::string suffix =
+ std::string(UniquePosition::kSuffixLength - null_str.length(), '\x7f')
+ + null_str;
+ UniquePosition null_pos = UniquePosition::FromInt64(10, suffix);
{
WriteTransaction trans(FROM_HERE, UNITTEST, dir_.get());
@@ -1512,7 +1544,8 @@ TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
MutableEntry child(&trans, CREATE, BOOKMARKS, parent.Get(ID), "child");
child.Put(IS_UNSYNCED, true);
- child.Put(SERVER_ORDINAL_IN_PARENT, null_ord);
+ child.Put(UNIQUE_POSITION, null_pos);
+ child.Put(SERVER_UNIQUE_POSITION, null_pos);
null_child_id = child.Get(ID);
}
@@ -1524,9 +1557,10 @@ TEST_F(SyncableDirectoryTest, OrdinalWithNullSurvivesSaveAndReload) {
Entry null_ordinal_child(&trans, GET_BY_ID, null_child_id);
EXPECT_TRUE(
- null_ord.Equals(null_ordinal_child.Get(SERVER_ORDINAL_IN_PARENT)));
+ null_pos.Equals(null_ordinal_child.Get(UNIQUE_POSITION)));
+ EXPECT_TRUE(
+ null_pos.Equals(null_ordinal_child.Get(SERVER_UNIQUE_POSITION)));
}
-
}
// An OnDirectoryBackingStore that can be set to always fail SaveChanges.
@@ -1879,13 +1913,13 @@ TEST_F(OnDiskSyncableDirectoryTest,
update_post_save.ref((ProtoField)i).SerializeAsString())
<< "Blob field #" << i << " changed during save/load";
}
- for ( ; i < ORDINAL_FIELDS_END; ++i) {
- EXPECT_EQ(create_pre_save.ref((OrdinalField)i).ToInternalValue(),
- create_post_save.ref((OrdinalField)i).ToInternalValue())
- << "Blob field #" << i << " changed during save/load";
- EXPECT_EQ(update_pre_save.ref((OrdinalField)i).ToInternalValue(),
- update_post_save.ref((OrdinalField)i).ToInternalValue())
- << "Blob field #" << i << " changed during save/load";
+ for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
+ EXPECT_TRUE(create_pre_save.ref((UniquePositionField)i).Equals(
+ create_post_save.ref((UniquePositionField)i)))
+ << "Position field #" << i << " changed during save/load";
+ EXPECT_TRUE(update_pre_save.ref((UniquePositionField)i).Equals(
+ update_post_save.ref((UniquePositionField)i)))
+ << "Position field #" << i << " changed during save/load";
}
}
diff --git a/sync/syncable/syncable_util.cc b/sync/syncable/syncable_util.cc
index 857fc85..6d7f126 100644
--- a/sync/syncable/syncable_util.cc
+++ b/sync/syncable/syncable_util.cc
@@ -66,27 +66,13 @@ void ChangeEntryIDAndUpdateChildren(
while (i != children.end()) {
MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
CHECK(child_entry.good());
- // Use the unchecked setter here to avoid touching the child's NEXT_ID
- // and PREV_ID fields (which Put(PARENT_ID) would normally do to
- // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID
- // among the children will be valid after the loop, since we update all
- // the children at once.
+ // Use the unchecked setter here to avoid touching the child's
+ // UNIQUE_POSITION field. In this case, UNIQUE_POSITION among the
+ // children will be valid after the loop, since we update all the children
+ // at once.
child_entry.PutParentIdPropertyOnly(new_id);
}
}
- // Update Id references on the previous and next nodes in the sibling
- // order. Do this by reinserting into the linked list; the first
- // step in PutPredecessor is to Unlink from the existing order, which
- // will overwrite the stale Id value from the adjacent nodes.
- if (entry->GetPredecessorId() == entry->GetSuccessorId() &&
- entry->GetPredecessorId() == old_id) {
- // We just need a shallow update to |entry|'s fields since it is already
- // self looped.
- entry->Put(NEXT_ID, new_id);
- entry->Put(PREV_ID, new_id);
- } else {
- entry->PutPredecessor(entry->GetPredecessorId());
- }
}
// Function to handle runtime failures on syncable code. Rather than crashing,
@@ -119,5 +105,12 @@ std::string GenerateSyncableHash(
return encode_output;
}
+std::string GenerateSyncableBookmarkHash(
+ const std::string originator_cache_guid,
+ const std::string originator_client_item_id) {
+ return syncable::GenerateSyncableHash(
+ BOOKMARKS, originator_cache_guid + originator_client_item_id);
+}
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/syncable/syncable_util.h b/sync/syncable/syncable_util.h
index 4ea8a6f..465324d 100644
--- a/sync/syncable/syncable_util.h
+++ b/sync/syncable/syncable_util.h
@@ -44,6 +44,13 @@ SYNC_EXPORT_PRIVATE int GetUnsyncedEntries(BaseTransaction* trans,
SYNC_EXPORT_PRIVATE std::string GenerateSyncableHash(
ModelType model_type, const std::string& client_tag);
+// A helper for generating the bookmark type's tag. This is required in more
+// than one place, so we define the algorithm here to make sure the
+// implementation is consistent.
+SYNC_EXPORT_PRIVATE std::string GenerateSyncableBookmarkHash(
+ const std::string originator_cache_guid,
+ const std::string originator_client_item_id);
+
} // namespace syncable
} // namespace syncer
diff --git a/sync/test/engine/mock_connection_manager.cc b/sync/test/engine/mock_connection_manager.cc
index 3fd4a32..e5fd452 100644
--- a/sync/test/engine/mock_connection_manager.cc
+++ b/sync/test/engine/mock_connection_manager.cc
@@ -32,6 +32,7 @@ namespace syncer {
using syncable::WriteTransaction;
static char kValidAuthToken[] = "AuthToken";
+static char kCacheGuid[] = "kqyg7097kro6GSUod+GSg==";
MockConnectionManager::MockConnectionManager(syncable::Directory* directory)
: ServerConnectionManager("unused", 0, false),
@@ -343,6 +344,19 @@ sync_pb::SyncEntity* MockConnectionManager::AddUpdateMeta(
ent->set_mtime(sync_ts);
ent->set_ctime(1);
ent->set_position_in_parent(GeneratePositionInParent());
+
+ // This isn't perfect, but it works well enough. This is an update, which
+ // means the ID is a server ID, which means it never changes. By making
+ // kCacheGuid also never change, we guarantee that the same item always has
+ // the same originator_cache_guid and originator_client_item_id.
+ //
+ // Unfortunately, neither this class nor the tests that use it explicitly
+ // track sync entitites, so supporting proper cache guids and client item IDs
+ // would require major refactoring. The ID used here ought to be the "c-"
+ // style ID that was sent up on the commit.
+ ent->set_originator_cache_guid(kCacheGuid);
+ ent->set_originator_client_item_id(id);
+
return ent;
}
@@ -395,9 +409,20 @@ sync_pb::SyncEntity* MockConnectionManager::AddUpdateFromLastCommit() {
last_commit_response().entryresponse(0).version());
ent->set_id_string(
last_commit_response().entryresponse(0).id_string());
+
+ // This is the same hack as in AddUpdateMeta. See the comment in that
+ // function for more information.
+ ent->set_originator_cache_guid(kCacheGuid);
+ ent->set_originator_client_item_id(
+ last_commit_response().entryresponse(0).id_string());
+
+ if (last_sent_commit().entries(0).has_unique_position()) {
+ ent->mutable_unique_position()->CopyFrom(
+ last_sent_commit().entries(0).unique_position());
+ }
+
// Tests don't currently care about the following:
- // originator_cache_guid, originator_client_item_id, parent_id_string,
- // name, non_unique_name.
+ // parent_id_string, name, non_unique_name.
}
return GetMutableLastUpdate();
}
diff --git a/sync/test/engine/mock_connection_manager.h b/sync/test/engine/mock_connection_manager.h
index 53b950bd..f4f9882 100644
--- a/sync/test/engine/mock_connection_manager.h
+++ b/sync/test/engine/mock_connection_manager.h
@@ -18,6 +18,7 @@
#include "sync/engine/net/server_connection_manager.h"
#include "sync/internal_api/public/base/model_type.h"
#include "sync/internal_api/public/base/model_type_invalidation_map.h"
+#include "sync/internal_api/public/base/unique_position.h"
#include "sync/protocol/sync.pb.h"
namespace syncer {
diff --git a/sync/test/test_directory_backing_store.h b/sync/test/test_directory_backing_store.h
index cbbe16e..54fe49e 100644
--- a/sync/test/test_directory_backing_store.h
+++ b/sync/test/test_directory_backing_store.h
@@ -48,7 +48,8 @@ class TestDirectoryBackingStore : public DirectoryBackingStore {
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion82To83);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion83To84);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion84To85);
- FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DetectInvalidOrdinal);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion85To86);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DetectInvalidPosition);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries);
diff --git a/sync/tools/testserver/chromiumsync.py b/sync/tools/testserver/chromiumsync.py
index 06dfa40..eecea5c 100644
--- a/sync/tools/testserver/chromiumsync.py
+++ b/sync/tools/testserver/chromiumsync.py
@@ -630,12 +630,14 @@ class SyncDataModel(object):
was changed and Chrome now sends up the absolute position. The server
must store a position_in_parent value and must not maintain
insert_after_item_id.
+ Starting in Jan 2013, the client will also send up a unique_position field
+ which should be saved and returned on subsequent GetUpdates.
Args:
entry: The entry for which to write a position. Its ID field are
- assumed to be server IDs. This entry will have its parent_id_string
- and position_in_parent fields updated; its insert_after_item_id field
- will be cleared.
+ assumed to be server IDs. This entry will have its parent_id_string,
+ position_in_parent and unique_position fields updated; its
+ insert_after_item_id field will be cleared.
parent_id: The ID of the entry intended as the new parent.
"""
@@ -950,12 +952,14 @@ class SyncDataModel(object):
entry = MakeTombstone(entry.id_string)
else:
# Comments in sync.proto detail how the representation of positional
- # ordering works: either the 'insert_after_item_id' field or the
- # 'position_in_parent' field may determine the sibling order during
- # Commit operations. The 'position_in_parent' field provides an absolute
- # ordering in GetUpdates contexts. Here we assume the client will
- # always send a valid position_in_parent (this is the newer style), and
- # we ignore insert_after_item_id (an older style).
+ # ordering works.
+ #
+ # We've almost fully deprecated the 'insert_after_item_id' field.
+ # The 'position_in_parent' field is also deprecated, but as of Jan 2013
+ # is still in common use. The 'unique_position' field is the latest
+ # and greatest in positioning technology.
+ #
+ # This server supports 'position_in_parent' and 'unique_position'.
self._WritePosition(entry, entry.parent_id_string)
# Preserve the originator info, which the client is not required to send