summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/apply_updates_command_unittest.cc2
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc84
-rw-r--r--chrome/browser/sync/engine/build_commit_command.cc20
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/conflict_resolution_view.h0
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/conflict_resolver.cc276
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/conflict_resolver.h28
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.h3
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/process_commit_response_command.cc34
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/process_updates_command.cc9
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncapi.cc42
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer.cc17
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer.h7
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_proto_util.cc22
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_proto_util.h9
-rw-r--r--chrome/browser/sync/engine/syncer_proto_util_unittest.cc61
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_types.h5
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_unittest.cc1924
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_util.cc98
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/syncer_util.h13
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/update_applicator.cc0
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/engine/update_applicator.h0
-rw-r--r--chrome/browser/sync/engine/verify_updates_command.cc5
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/syncable/directory_backing_store.cc52
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/syncable/syncable.cc383
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/syncable/syncable.h296
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/syncable/syncable_columns.h5
-rwxr-xr-x[-rw-r--r--]chrome/browser/sync/syncable/syncable_unittest.cc291
-rwxr-xr-xchrome/chrome.gyp2
-rwxr-xr-x[-rw-r--r--]chrome/test/sync/engine/mock_server_connection.cc12
-rwxr-xr-x[-rw-r--r--]chrome/test/sync/engine/mock_server_connection.h9
-rwxr-xr-xchrome/test/sync/engine/test_syncable_utils.cc60
-rwxr-xr-xchrome/test/sync/engine/test_syncable_utils.h38
32 files changed, 1160 insertions, 2647 deletions
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
index 795ca00..4076bcb 100644..100755
--- a/chrome/browser/sync/engine/apply_updates_command_unittest.cc
+++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
@@ -47,7 +47,7 @@ class ApplyUpdatesCommandTest : public testing::Test {
ASSERT_TRUE(entry.good());
entry.Put(syncable::SERVER_VERSION, next_revision_++);
entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
- entry.Put(syncable::SERVER_NAME, item_id);
+
entry.Put(syncable::SERVER_NON_UNIQUE_NAME, item_id);
entry.Put(syncable::SERVER_PARENT_ID, Id::CreateFromServerId(parent_id));
entry.Put(syncable::SERVER_IS_DIR, true);
diff --git a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
index 239732c..edbd601 100644..100755
--- a/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
+++ b/chrome/browser/sync/engine/build_and_process_conflict_sets_command.cc
@@ -92,28 +92,6 @@ void StoreLocalDataForUpdateRollback(syncable::Entry* entry,
*backup = entry->GetKernelCopy();
}
-class UniqueNameGenerator {
- public:
- void Initialize() {
- // To avoid name collisions we prefix the names with hex data derived from
- // 64 bits of randomness.
- int64 name_prefix = static_cast<int64>(base::RandUint64());
- name_stem_ = StringPrintf("%0" PRId64 "x.", name_prefix);
- }
- string StringNameForEntry(const syncable::Entry& entry) {
- CHECK(!name_stem_.empty());
- std::stringstream rv;
- rv << name_stem_ << entry.Get(syncable::ID);
- return rv.str();
- }
- PathString PathStringNameForEntry(const syncable::Entry& entry) {
- string name = StringNameForEntry(entry);
- return PathString(name.begin(), name.end());
- }
-
- private:
- string name_stem_;
-};
bool RollbackEntry(syncable::WriteTransaction* trans,
syncable::EntryKernel* backup) {
@@ -123,9 +101,9 @@ bool RollbackEntry(syncable::WriteTransaction* trans,
if (!entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL)))
return false;
- syncable::Name name = syncable::Name::FromEntryKernel(backup);
- if (!entry.PutParentIdAndName(backup->ref(syncable::PARENT_ID), name))
- return false;
+
+ entry.Put(syncable::NON_UNIQUE_NAME, backup->ref(syncable::NON_UNIQUE_NAME));
+ entry.Put(syncable::PARENT_ID, backup->ref(syncable::PARENT_ID));
if (!backup->ref(syncable::IS_DEL)) {
if (!entry.PutPredecessor(backup->ref(syncable::PREV_ID)))
@@ -146,26 +124,14 @@ bool RollbackEntry(syncable::WriteTransaction* trans,
return true;
}
-class TransactionalUpdateEntryPreparer {
- public:
- TransactionalUpdateEntryPreparer() {
- namegen_.Initialize();
- }
-
- void PrepareEntries(syncable::WriteTransaction* trans,
- const vector<syncable::Id>* ids) {
- vector<syncable::Id>::const_iterator it;
- for (it = ids->begin(); it != ids->end(); ++it) {
- syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it);
- syncable::Name random_name(namegen_.PathStringNameForEntry(entry));
- CHECK(entry.PutParentIdAndName(trans->root_id(), random_name));
- }
+void PlaceEntriesAtRoot(syncable::WriteTransaction* trans,
+ const vector<syncable::Id>* ids) {
+ vector<syncable::Id>::const_iterator it;
+ for (it = ids->begin(); it != ids->end(); ++it) {
+ syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it);
+ entry.Put(syncable::PARENT_ID, trans->root_id());
}
-
- private:
- UniqueNameGenerator namegen_;
- DISALLOW_COPY_AND_ASSIGN(TransactionalUpdateEntryPreparer);
-};
+}
} // namespace
@@ -208,13 +174,12 @@ bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
StoreLocalDataForUpdateRollback(&entry, &rollback_data[i]);
}
- // 4. Use the preparer to move things to an initial starting state where no
- // names collide, and nothing in the set is a child of anything else. If
+ // 4. Use the preparer to move things to an initial starting state where
+ // nothing in the set is a child of anything else. If
// we've correctly calculated the set, the server tree is valid and no
// changes have occurred locally we should be able to apply updates from this
// state.
- TransactionalUpdateEntryPreparer preparer;
- preparer.PrepareEntries(trans, update_set);
+ PlaceEntriesAtRoot(trans, update_set);
// 5. Use the usual apply updates from the special start state we've just
// prepared.
@@ -229,7 +194,7 @@ bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
// set with other failing updates, the swap may have gone through, meaning
// the roll back needs to be transactional. But as we're going to a known
// good state we should always succeed.
- preparer.PrepareEntries(trans, update_set);
+ PlaceEntriesAtRoot(trans, update_set);
// Rollback all entries.
for (size_t i = 0; i < rollback_data.size(); ++i) {
@@ -257,7 +222,7 @@ void BuildAndProcessConflictSetsCommand::BuildConflictSets(
view->EraseCommitConflict(i++);
continue;
}
- if (entry.ExistsOnClientBecauseDatabaseNameIsNonEmpty() &&
+ if (entry.ExistsOnClientBecauseNameIsNonEmpty() &&
(entry.Get(syncable::IS_DEL) || entry.Get(syncable::SERVER_IS_DEL))) {
// If we're deleted on client or server we can't be in a complex set.
++i;
@@ -265,10 +230,6 @@ void BuildAndProcessConflictSetsCommand::BuildConflictSets(
}
bool new_parent =
entry.Get(syncable::PARENT_ID) != entry.Get(syncable::SERVER_PARENT_ID);
- bool new_name = 0 != syncable::ComparePathNames(entry.GetSyncNameValue(),
- entry.Get(syncable::SERVER_NAME));
- if (new_parent || new_name)
- MergeSetsForNameClash(trans, &entry, view);
if (new_parent)
MergeSetsForIntroducedLoops(trans, &entry, view);
MergeSetsForNonEmptyDirectories(trans, &entry, view);
@@ -276,21 +237,6 @@ void BuildAndProcessConflictSetsCommand::BuildConflictSets(
}
}
-void BuildAndProcessConflictSetsCommand::MergeSetsForNameClash(
- syncable::BaseTransaction* trans, syncable::Entry* entry,
- ConflictResolutionView* view) {
- PathString server_name = entry->Get(syncable::SERVER_NAME);
- // Uncommitted entries have no server name. We trap this because the root
- // item has a null name and 0 parentid.
- if (server_name.empty())
- return;
- syncable::Id conflicting_id =
- SyncerUtil::GetNameConflictingItemId(
- trans, entry->Get(syncable::SERVER_PARENT_ID), server_name);
- if (syncable::kNullId != conflicting_id)
- view->MergeSets(entry->Get(syncable::ID), conflicting_id);
-}
-
void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops(
syncable::BaseTransaction* trans, syncable::Entry* entry,
ConflictResolutionView* view) {
diff --git a/chrome/browser/sync/engine/build_commit_command.cc b/chrome/browser/sync/engine/build_commit_command.cc
index 07ae99a..637ab39 100644
--- a/chrome/browser/sync/engine/build_commit_command.cc
+++ b/chrome/browser/sync/engine/build_commit_command.cc
@@ -22,7 +22,6 @@ using std::vector;
using syncable::ExtendedAttribute;
using syncable::Id;
using syncable::MutableEntry;
-using syncable::Name;
namespace browser_sync {
@@ -66,17 +65,16 @@ void BuildCommitCommand::ExecuteImpl(SyncerSession* session) {
// This is the only change we make to the entry in this function.
meta_entry.Put(syncable::SYNCING, true);
- Name name = meta_entry.GetName();
- CHECK(!name.value().empty()); // Make sure this isn't an update.
- sync_entry->set_name(name.value());
- // Set the non_unique_name if we have one. If we do, the server ignores
+ PathString name = meta_entry.Get(syncable::NON_UNIQUE_NAME);
+ CHECK(!name.empty()); // Make sure this isn't an update.
+ sync_entry->set_name(name);
+
+ // Set the non_unique_name. If we do, the server ignores
// the |name| value (using |non_unique_name| instead), and will return
- // in the CommitResponse a unique name if one is generated. Even though
- // we could get away with only sending |name|, we send both because it
- // may aid in logging.
- if (name.value() != name.non_unique_value()) {
- sync_entry->set_non_unique_name(name.non_unique_value());
- }
+ // in the CommitResponse a unique name if one is generated.
+ // We send both because it may aid in logging.
+ sync_entry->set_non_unique_name(name);
+
// Deleted items with negative parent ids can be a problem so we set the
// parent to 0. (TODO(sync): Still true in protocol?).
Id new_parent_id;
diff --git a/chrome/browser/sync/engine/conflict_resolution_view.h b/chrome/browser/sync/engine/conflict_resolution_view.h
index e349d3e..e349d3e 100644..100755
--- a/chrome/browser/sync/engine/conflict_resolution_view.h
+++ b/chrome/browser/sync/engine/conflict_resolution_view.h
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
index f67ea52..1f7896e 100644..100755
--- a/chrome/browser/sync/engine/conflict_resolver.cc
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -23,9 +23,7 @@ using syncable::Directory;
using syncable::Entry;
using syncable::Id;
using syncable::MutableEntry;
-using syncable::Name;
using syncable::ScopedDirLookup;
-using syncable::SyncName;
using syncable::WriteTransaction;
namespace browser_sync {
@@ -38,88 +36,6 @@ ConflictResolver::ConflictResolver() {
ConflictResolver::~ConflictResolver() {
}
-namespace {
-// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
-// This is historical cruft that seems to be actually reached by some users.
-inline PathString GetConflictPathnameBase(PathString base) {
- time_t time_since = time(NULL);
- struct tm* now = localtime(&time_since);
- // Use a fixed format as the locale's format may include '/' characters or
- // other illegal characters.
- PathString date = IntToPathString(now->tm_year + 1900);
- date.append(PSTR("-"));
- ++now->tm_mon; // tm_mon is 0-based.
- if (now->tm_mon < 10)
- date.append(PSTR("0"));
- date.append(IntToPathString(now->tm_mon));
- date.append(PSTR("-"));
- if (now->tm_mday < 10)
- date.append(PSTR("0"));
- date.append(IntToPathString(now->tm_mday));
- return base + PSTR(" (Edited on ") + date + PSTR(")");
-}
-
-// TODO(ncarter): Remove title/path conflicts and the code to resolve them.
-Name FindNewName(BaseTransaction* trans,
- Id parent_id,
- const SyncName& original_name) {
- const PathString name = original_name.value();
- // 255 is defined in our spec.
- const size_t allowed_length = kSyncProtocolMaxNameLengthBytes;
- // TODO(sync): How do we get length on other platforms? The limit is
- // checked in java on the server, so it's not the number of glyphs its the
- // number of 16 bit characters in the UTF-16 representation.
-
- // 10 characters for 32 bit numbers + 2 characters for brackets means 12
- // characters should be more than enough for the name. Doubling this ensures
- // that we will have enough space.
- COMPILE_ASSERT(kSyncProtocolMaxNameLengthBytes >= 24,
- maximum_name_too_short);
- CHECK(name.length() <= allowed_length);
-
- if (!Entry(trans,
- syncable::GET_BY_PARENTID_AND_DBNAME,
- parent_id,
- name).good())
- return Name::FromSyncName(original_name);
- PathString base = name;
- PathString ext;
- PathString::size_type ext_index = name.rfind('.');
- if (PathString::npos != ext_index && 0 != ext_index &&
- name.length() - ext_index < allowed_length / 2) {
- base = name.substr(0, ext_index);
- ext = name.substr(ext_index);
- }
-
- PathString name_base = GetConflictPathnameBase(base);
- if (name_base.length() + ext.length() > allowed_length) {
- name_base.resize(allowed_length - ext.length());
- TrimPathStringToValidCharacter(&name_base);
- }
- PathString new_name = name_base + ext;
- int n = 2;
- while (Entry(trans,
- syncable::GET_BY_PARENTID_AND_DBNAME,
- parent_id,
- new_name).good()) {
- PathString local_ext = PSTR("(");
- local_ext.append(IntToPathString(n));
- local_ext.append(PSTR(")"));
- local_ext.append(ext);
- if (name_base.length() + local_ext.length() > allowed_length) {
- name_base.resize(allowed_length - local_ext.length());
- TrimPathStringToValidCharacter(&name_base);
- }
- new_name = name_base + local_ext;
- n++;
- }
-
- CHECK(new_name.length() <= kSyncProtocolMaxNameLengthBytes);
- return Name(new_name);
-}
-
-} // namespace
-
void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
// An update matches local actions, merge the changes.
// This is a little fishy because we don't actually merge them.
@@ -151,8 +67,10 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
CHECK(entry.good());
// If an update fails, locally we have to be in a set or unsynced. We're not
// in a set here, so we must be unsynced.
- if (!entry.Get(syncable::IS_UNSYNCED))
+ if (!entry.Get(syncable::IS_UNSYNCED)) {
return NO_SYNC_PROGRESS;
+ }
+
if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
if (!entry.Get(syncable::PARENT_ID).ServerKnows()) {
LOG(INFO) << "Item conflicting because its parent not yet committed. "
@@ -164,6 +82,7 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
return NO_SYNC_PROGRESS;
}
+
if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) {
// we've both deleted it, so lets just drop the need to commit/update this
// entry.
@@ -180,7 +99,8 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
// Check if there's no changes.
// Verbose but easier to debug.
- bool name_matches = entry.SyncNameMatchesServerName();
+ bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) ==
+ entry.Get(syncable::SERVER_NON_UNIQUE_NAME);
bool parent_matches = entry.Get(syncable::PARENT_ID) ==
entry.Get(syncable::SERVER_PARENT_ID);
bool entry_deleted = entry.Get(syncable::IS_DEL);
@@ -208,7 +128,6 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
return NO_SYNC_PROGRESS;
}
}
- // METRIC conflict resolved by entry split;
// If the entry's deleted on the server, we can have a directory here.
entry.Put(syncable::IS_UNSYNCED, true);
@@ -225,162 +144,6 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
}
-namespace {
-
-bool NamesCollideWithChildrenOfFolder(BaseTransaction* trans,
- const Directory::ChildHandles& children,
- Id folder_id) {
- Directory::ChildHandles::const_iterator i = children.begin();
- while (i != children.end()) {
- Entry child(trans, syncable::GET_BY_HANDLE, *i);
- CHECK(child.good());
- if (Entry(trans,
- syncable::GET_BY_PARENTID_AND_DBNAME,
- folder_id,
- child.GetName().db_value()).good())
- return true;
- ++i;
- }
- return false;
-}
-
-void GiveEntryNewName(WriteTransaction* trans, MutableEntry* entry) {
- using namespace syncable;
- Name new_name =
- FindNewName(trans, entry->Get(syncable::PARENT_ID), entry->GetName());
- LOG(INFO) << "Resolving name clash, renaming " << *entry << " to "
- << new_name.db_value();
- entry->PutName(new_name);
- CHECK(entry->Get(syncable::IS_UNSYNCED));
-}
-
-} // namespace
-
-bool ConflictResolver::AttemptItemMerge(WriteTransaction* trans,
- MutableEntry* locally_named,
- MutableEntry* server_named) {
- // To avoid complications we only merge new entries with server entries.
- if (locally_named->Get(syncable::IS_DIR) !=
- server_named->Get(syncable::SERVER_IS_DIR) ||
- locally_named->Get(syncable::ID).ServerKnows() ||
- locally_named->Get(syncable::IS_UNAPPLIED_UPDATE) ||
- server_named->Get(syncable::IS_UNSYNCED))
- return false;
- Id local_id = locally_named->Get(syncable::ID);
- Id desired_id = server_named->Get(syncable::ID);
- if (locally_named->Get(syncable::IS_DIR)) {
- // Extra work for directory name clash. We have to make sure we don't have
- // clashing child items, and update the parent id the children of the new
- // entry.
- Directory::ChildHandles children;
- trans->directory()->GetChildHandles(trans, local_id, &children);
- if (NamesCollideWithChildrenOfFolder(trans, children, desired_id))
- return false;
-
- LOG(INFO) << "Merging local changes to: " << desired_id << ". "
- << *locally_named;
-
- server_named->Put(syncable::ID, trans->directory()->NextId());
- Directory::ChildHandles::iterator i;
- for (i = children.begin() ; i != children.end() ; ++i) {
- MutableEntry child_entry(trans, syncable::GET_BY_HANDLE, *i);
- CHECK(child_entry.good());
- CHECK(child_entry.Put(syncable::PARENT_ID, desired_id));
- CHECK(child_entry.Put(syncable::IS_UNSYNCED, true));
- Id id = child_entry.Get(syncable::ID);
- // We only note new entries for quicker merging next round.
- if (!id.ServerKnows())
- children_of_merged_dirs_.insert(id);
- }
- } else {
- if (!server_named->Get(syncable::IS_DEL))
- return false;
- }
-
- LOG(INFO) << "Identical client and server items merging server changes. " <<
- *locally_named << " server: " << *server_named;
-
- // Clear server_named's server data and mark it deleted so it goes away
- // quietly because it's now identical to a deleted local entry.
- // locally_named takes on the ID of the server entry.
- server_named->Put(syncable::ID, trans->directory()->NextId());
- locally_named->Put(syncable::ID, desired_id);
- locally_named->Put(syncable::IS_UNSYNCED, false);
- CopyServerFields(server_named, locally_named);
- ClearServerData(server_named);
- server_named->Put(syncable::IS_DEL, true);
- server_named->Put(syncable::BASE_VERSION, 0);
- CHECK(SUCCESS ==
- SyncerUtil::AttemptToUpdateEntryWithoutMerge(
- trans, locally_named, NULL));
- return true;
-}
-
-ConflictResolver::ServerClientNameClashReturn
-ConflictResolver::ProcessServerClientNameClash(WriteTransaction* trans,
- MutableEntry* locally_named,
- MutableEntry* server_named,
- SyncerSession* session) {
- if (!locally_named->ExistsOnClientBecauseDatabaseNameIsNonEmpty())
- return NO_CLASH; // Locally_named is a server update.
- if (locally_named->Get(syncable::IS_DEL) ||
- server_named->Get(syncable::SERVER_IS_DEL)) {
- return NO_CLASH;
- }
- if (locally_named->Get(syncable::PARENT_ID) !=
- server_named->Get(syncable::SERVER_PARENT_ID)) {
- return NO_CLASH; // Different parents.
- }
-
- PathString name = locally_named->GetSyncNameValue();
- if (0 != syncable::ComparePathNames(name,
- server_named->Get(syncable::SERVER_NAME))) {
- return NO_CLASH; // Different names.
- }
-
- // First try to merge.
- if (AttemptItemMerge(trans, locally_named, server_named)) {
- // METRIC conflict resolved by merge
- return SOLVED;
- }
- // We need to rename.
- if (!locally_named->Get(syncable::IS_UNSYNCED)) {
- LOG(ERROR) << "Locally named part of a name conflict not unsynced?";
- locally_named->Put(syncable::IS_UNSYNCED, true);
- }
- if (!server_named->Get(syncable::IS_UNAPPLIED_UPDATE)) {
- LOG(ERROR) << "Server named part of a name conflict not an update?";
- }
- GiveEntryNewName(trans, locally_named);
-
- // METRIC conflict resolved by rename
- return SOLVED;
-}
-
-ConflictResolver::ServerClientNameClashReturn
-ConflictResolver::ProcessNameClashesInSet(WriteTransaction* trans,
- ConflictSet* conflict_set,
- SyncerSession* session) {
- ConflictSet::const_iterator i,j;
- for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
- MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
- if (!entryi.Get(syncable::IS_UNSYNCED) &&
- !entryi.Get(syncable::IS_UNAPPLIED_UPDATE))
- // This set is broken / doesn't make sense, this may be transient.
- return BOGUS_SET;
- for (j = conflict_set->begin() ; *i != *j ; ++j) {
- MutableEntry entryj(trans, syncable::GET_BY_ID, *j);
- ServerClientNameClashReturn rv =
- ProcessServerClientNameClash(trans, &entryi, &entryj, session);
- if (NO_CLASH == rv)
- rv = ProcessServerClientNameClash(trans, &entryj, &entryi, session);
- if (NO_CLASH != rv)
- return rv;
- }
- }
- return NO_CLASH;
-}
-
ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
ConflictSet* set) {
// TODO(sync): Come up with a better scheme for set hashing. This scheme
@@ -481,6 +244,8 @@ bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
return true;
}
+
+// TODO(chron): needs unit test badly
bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
ConflictSet* conflict_set,
const Entry& entry) {
@@ -540,18 +305,19 @@ bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
// Now we fix things up by undeleting all the folders in the item's path.
id = parent_id;
while (!id.IsRoot() && id != reroot_id) {
- if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
+ if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
break;
+ }
MutableEntry entry(trans, syncable::GET_BY_ID, id);
+
+ LOG(INFO) << "Undoing our deletion of " << entry
+ << ", will have name " << entry.Get(syncable::NON_UNIQUE_NAME);
+
Id parent_id = entry.Get(syncable::PARENT_ID);
- if (parent_id == reroot_id)
+ if (parent_id == reroot_id) {
parent_id = trans->root_id();
- Name old_name = entry.GetName();
- Name new_name = FindNewName(trans, parent_id, old_name);
- LOG(INFO) << "Undoing our deletion of " << entry <<
- ", will have name " << new_name.db_value();
- if (new_name != old_name || parent_id != entry.Get(syncable::PARENT_ID))
- CHECK(entry.PutParentIdAndName(parent_id, new_name));
+ }
+ entry.Put(syncable::PARENT_ID, parent_id);
entry.Put(syncable::IS_DEL, false);
id = entry.Get(syncable::PARENT_ID);
// METRIC conflict resolved by recreating dir tree.
@@ -576,6 +342,7 @@ bool AttemptToFixRemovedDirectoriesWithContent(WriteTransaction* trans,
} // namespace
+// TODO(sync): Eliminate conflict sets. They're not necessary.
bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
ConflictSet* conflict_set,
int conflict_count,
@@ -597,13 +364,6 @@ bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
LOG(INFO) << "Fixing a set containing " << set_size << " items";
- ServerClientNameClashReturn rv = ProcessNameClashesInSet(trans, conflict_set,
- session);
- if (SOLVED == rv)
- return true;
- if (NO_CLASH != rv)
- return false;
-
// Fix circular conflicts.
if (AttemptToFixCircularConflict(trans, conflict_set))
return true;
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
index 3ba0db1..779e123 100644..100755
--- a/chrome/browser/sync/engine/conflict_resolver.h
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -41,13 +41,6 @@ class ConflictResolver {
ConflictResolutionView* view,
SyncerSession* session);
- // Called by ProcessServerClientNameClash. Returns true if it's merged the
- // items, false otherwise. Does not re-check preconditions covered in
- // ProcessServerClientNameClash (i.e. it assumes a name clash).
- bool AttemptItemMerge(syncable::WriteTransaction* trans,
- syncable::MutableEntry* local_entry,
- syncable::MutableEntry* server_entry);
-
private:
// We keep a map to record how often we've seen each conflict set. We use this
// to screen out false positives caused by transient server or client states,
@@ -62,13 +55,6 @@ class ConflictResolver {
SYNC_PROGRESS, // Progress made.
};
- enum ServerClientNameClashReturn {
- NO_CLASH,
- SOLUTION_DEFERRED,
- SOLVED,
- BOGUS_SET,
- };
-
// Get a key for the given set. NOTE: May reorder set contents. The key is
// currently not very efficient, but will ease debugging.
ConflictSetCountMapKey GetSetKey(ConflictSet* conflict_set);
@@ -91,20 +77,6 @@ class ConflictResolver {
int conflict_count,
SyncerSession* session);
- // Gives any unsynced entries in the given set new names if possible.
- bool RenameUnsyncedEntries(syncable::WriteTransaction* trans,
- ConflictSet* conflict_set);
-
- ServerClientNameClashReturn ProcessServerClientNameClash(
- syncable::WriteTransaction* trans,
- syncable::MutableEntry* locally_named,
- syncable::MutableEntry* server_named,
- SyncerSession* session);
- ServerClientNameClashReturn ProcessNameClashesInSet(
- syncable::WriteTransaction* trans,
- ConflictSet* conflict_set,
- SyncerSession* session);
-
// Returns true if we're stuck.
template <typename InputIt>
bool LogAndSignalIfConflictStuck(syncable::BaseTransaction* trans,
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.h b/chrome/browser/sync/engine/get_commit_ids_command.h
index 4bbae82..3abb975 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.h
+++ b/chrome/browser/sync/engine/get_commit_ids_command.h
@@ -96,8 +96,7 @@ class GetCommitIdsCommand : public SyncerCommand {
// TODO(chron): Remove writes from this iterator. As a warning, this
// iterator causes writes to entries and so isn't a pure iterator.
- // It will do Put(IS_UNSYNCED) as well as add things to the blocked
- // session list. Refactor this out later.
+ // It will do Put(IS_UNSYNCED). Refactor this out later.
class CommitMetahandleIterator {
public:
// TODO(chron): Cache ValidateCommitEntry responses across iterators to save
diff --git a/chrome/browser/sync/engine/process_commit_response_command.cc b/chrome/browser/sync/engine/process_commit_response_command.cc
index e65a7f1..f0fe413 100644..100755
--- a/chrome/browser/sync/engine/process_commit_response_command.cc
+++ b/chrome/browser/sync/engine/process_commit_response_command.cc
@@ -20,9 +20,6 @@ using syncable::ScopedDirLookup;
using syncable::WriteTransaction;
using syncable::MutableEntry;
using syncable::Entry;
-using syncable::Name;
-using syncable::SyncName;
-using syncable::DBName;
using std::set;
using std::vector;
@@ -124,6 +121,7 @@ void ProcessCommitResponseCommand::ProcessCommitResponse(
break;
case CommitResponse::CONFLICT:
++conflicting_commits;
+ // This is important to activate conflict resolution.
session->AddCommitConflict(commit_ids[i]);
break;
case CommitResponse::SUCCESS:
@@ -339,33 +337,15 @@ void ProcessCommitResponseCommand::PerformCommitTimeNameAside(
syncable::WriteTransaction* trans,
const CommitResponse_EntryResponse& server_entry,
syncable::MutableEntry* local_entry) {
- Name old_name(local_entry->GetName());
-
- // Ensure that we don't collide with an existing entry.
- SyncName server_name =
+ PathString old_name = local_entry->Get(syncable::NON_UNIQUE_NAME);
+ const string server_name =
SyncerProtoUtil::NameFromCommitEntryResponse(server_entry);
- LOG(INFO) << "Server provided committed name:" << server_name.value();
- if (!server_name.value().empty() &&
- static_cast<SyncName&>(old_name) != server_name) {
- LOG(INFO) << "Server name differs from local name, attempting"
- << " commit time name aside.";
-
- DBName db_name(server_name.value());
- db_name.MakeOSLegal();
-
- // This is going to produce ~1 names instead of (Edited) names.
- // Since this should be EXTREMELY rare, we do this for now.
- db_name.MakeNoncollidingForEntry(trans, local_entry->Get(SERVER_PARENT_ID),
- local_entry);
-
- CHECK(!db_name.empty());
-
- LOG(INFO) << "Server commit moved aside entry: " << old_name.db_value()
- << " to new name " << db_name;
-
+ if (!server_name.empty() && old_name != server_name) {
+ LOG(INFO) << "Server commit moved aside entry: " << old_name
+ << " to new name " << server_name;
// Should be safe since we're in a "commit lock."
- local_entry->PutName(Name::FromDBNameAndSyncName(db_name, server_name));
+ local_entry->Put(syncable::NON_UNIQUE_NAME, server_name);
}
}
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc
index fefa234..468a342 100644..100755
--- a/chrome/browser/sync/engine/process_updates_command.cc
+++ b/chrome/browser/sync/engine/process_updates_command.cc
@@ -134,7 +134,8 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
const SyncEntity& entry = *static_cast<const SyncEntity*>(&pb_entry);
using namespace syncable;
syncable::Id id = entry.id();
- SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ const std::string name =
+ SyncerProtoUtil::NameFromSyncEntity(entry);
WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
@@ -153,8 +154,10 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
if (update_entry.Get(SERVER_VERSION) == update_entry.Get(BASE_VERSION) &&
!update_entry.Get(IS_UNSYNCED)) {
- CHECK(SyncerUtil::ServerAndLocalEntriesMatch(
- &update_entry)) << update_entry;
+ // Previously this was a big issue but at this point we don't really care
+ // that much if things don't match up exactly.
+ LOG_IF(ERROR, !SyncerUtil::ServerAndLocalEntriesMatch(&update_entry))
+ << update_entry;
}
return SUCCESS_PROCESSED;
}
diff --git a/chrome/browser/sync/engine/syncapi.cc b/chrome/browser/sync/engine/syncapi.cc
index 19225cf..ea2be76 100644..100755
--- a/chrome/browser/sync/engine/syncapi.cc
+++ b/chrome/browser/sync/engine/syncapi.cc
@@ -244,8 +244,8 @@ bool BaseNode::GetIsFolder() const {
}
const std::wstring& BaseNode::GetTitle() const {
- ServerNameToSyncAPIName(GetEntry()->GetName().non_unique_value(),
- &data_->title);
+ ServerNameToSyncAPIName(GetEntry()->Get(syncable::NON_UNIQUE_NAME),
+ &data_->title);
return data_->title;
}
@@ -316,22 +316,12 @@ void WriteNode::SetTitle(const std::wstring& title) {
std::string server_legal_name;
SyncAPINameToServerName(title, &server_legal_name);
- syncable::Name old_name = entry_->GetName();
+ PathString old_name = entry_->Get(syncable::NON_UNIQUE_NAME);
- if (server_legal_name == old_name.non_unique_value())
+ if (server_legal_name == old_name)
return; // Skip redundant changes.
- // Otherwise, derive a new unique name so we have a valid value
- // to use as the DBName.
- syncable::SyncName sync_name(server_legal_name);
- syncable::DBName db_name(sync_name.value());
- db_name.MakeOSLegal();
- db_name.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(),
- entry_->Get(syncable::PARENT_ID), entry_);
-
- syncable::Name new_name = syncable::Name::FromDBNameAndSyncName(db_name,
- sync_name);
- entry_->PutName(new_name);
+ entry_->Put(syncable::NON_UNIQUE_NAME, server_legal_name);
MarkForSyncing();
}
@@ -381,12 +371,9 @@ bool WriteNode::InitByCreation(const BaseNode& parent,
syncable::Id parent_id = parent.GetEntry()->Get(syncable::ID);
- // Start out with a dummy name, but make it unique. We expect
+ // Start out with a dummy name. We expect
// the caller to set a meaningful name after creation.
- syncable::DBName dummy(kDefaultNameForNewNodes);
- dummy.MakeOSLegal();
- dummy.MakeNoncollidingForEntry(transaction_->GetWrappedTrans(), parent_id,
- NULL);
+ PathString dummy(kDefaultNameForNewNodes);
entry_ = new syncable::MutableEntry(transaction_->GetWrappedWriteTrans(),
syncable::CREATE, parent_id, dummy);
@@ -425,16 +412,9 @@ bool WriteNode::SetPosition(const BaseNode& new_parent,
}
}
- // Discard the old database name, derive a new database name from the sync
- // name, and make it legal and unique.
- syncable::Name name = syncable::Name::FromSyncName(GetEntry()->GetName());
- name.db_value().MakeOSLegal();
- name.db_value().MakeNoncollidingForEntry(GetTransaction()->GetWrappedTrans(),
- new_parent_id, entry_);
-
- // Atomically change the parent and name. This will fail if it would
+ // Atomically change the parent. This will fail if it would
// introduce a cycle in the hierarchy.
- if (!entry_->PutParentIdAndName(new_parent_id, name))
+ if (!entry_->Put(syncable::PARENT_ID, new_parent_id))
return false;
// Now set the predecessor, which sets IS_UNSYNCED as necessary.
@@ -813,9 +793,7 @@ class SyncManager::SyncInternal {
// value of false means that it should be OK to ignore this change.
static bool BookmarkPropertiesDiffer(const syncable::EntryKernel& a,
const syncable::Entry& b) {
- if (a.ref(syncable::NAME) != b.Get(syncable::NAME))
- return true;
- if (a.ref(syncable::UNSANITIZED_NAME) != b.Get(syncable::UNSANITIZED_NAME))
+ if (a.ref(syncable::NON_UNIQUE_NAME) != b.Get(syncable::NON_UNIQUE_NAME))
return true;
if (a.ref(syncable::IS_DIR) != b.Get(syncable::IS_DIR))
return true;
diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc
index ee92258..c97e709 100644..100755
--- a/chrome/browser/sync/engine/syncer.cc
+++ b/chrome/browser/sync/engine/syncer.cc
@@ -26,6 +26,7 @@
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable-inl.h"
#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/closure.h"
using sync_pb::ClientCommand;
using syncable::Blob;
@@ -37,7 +38,6 @@ using syncable::SERVER_IS_BOOKMARK_OBJECT;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
-using syncable::SERVER_NAME;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
@@ -62,7 +62,7 @@ Syncer::Syncer(
model_safe_worker_(model_safe_worker),
updates_source_(sync_pb::GetUpdatesCallerInfo::UNKNOWN),
notifications_enabled_(false),
- pre_conflict_resolution_function_(NULL) {
+ pre_conflict_resolution_closure_(NULL) {
SyncerEvent shutdown = { SyncerEvent::SHUTDOWN_USE_WITH_CARE };
syncer_event_channel_.reset(new SyncerEventChannel(shutdown));
shutdown_channel_.reset(new ShutdownChannel(this));
@@ -245,15 +245,10 @@ void Syncer::SyncShare(SyncerSession* session,
case RESOLVE_CONFLICTS: {
LOG(INFO) << "Resolving Conflicts";
- // Trigger the pre_conflict_resolution_function_, which is a testing
+ // Trigger the pre_conflict_resolution_closure_, which is a testing
// hook for the unit tests, if it is non-NULL.
- if (pre_conflict_resolution_function_) {
- ScopedDirLookup dir(dirman_, account_name_);
- if (!dir.good()) {
- LOG(ERROR) << "Bad dir lookup in syncer loop";
- return;
- }
- pre_conflict_resolution_function_(dir);
+ if (pre_conflict_resolution_closure_) {
+ pre_conflict_resolution_closure_->Run();
}
ResolveConflictsCommand resolve_conflicts_command;
@@ -312,7 +307,6 @@ void Syncer::ProcessClientCommand(SyncerSession* session) {
}
void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
- dest->Put(SERVER_NAME, src->Get(SERVER_NAME));
dest->Put(SERVER_NON_UNIQUE_NAME, src->Get(SERVER_NON_UNIQUE_NAME));
dest->Put(SERVER_PARENT_ID, src->Get(SERVER_PARENT_ID));
dest->Put(SERVER_MTIME, src->Get(SERVER_MTIME));
@@ -328,7 +322,6 @@ void CopyServerFields(syncable::Entry* src, syncable::MutableEntry* dest) {
}
void ClearServerData(syncable::MutableEntry* entry) {
- entry->Put(SERVER_NAME, PSTR(""));
entry->Put(SERVER_NON_UNIQUE_NAME, PSTR(""));
entry->Put(SERVER_PARENT_ID, syncable::kNullId);
entry->Put(SERVER_MTIME, 0);
diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h
index 067193b..c25ddca 100644..100755
--- a/chrome/browser/sync/engine/syncer.h
+++ b/chrome/browser/sync/engine/syncer.h
@@ -17,6 +17,7 @@
#include "chrome/browser/sync/engine/syncer_types.h"
#include "chrome/browser/sync/engine/syncproto.h"
#include "chrome/browser/sync/syncable/directory_event.h"
+#include "chrome/browser/sync/util/closure.h"
#include "chrome/browser/sync/util/event_sys-inl.h"
#include "chrome/browser/sync/util/event_sys.h"
#include "chrome/browser/sync/util/extensions_activity_monitor.h"
@@ -71,7 +72,6 @@ enum SyncerStep {
class Syncer {
public:
typedef std::vector<int64> UnsyncedMetaHandles;
- typedef void (*TestCallbackFunction)(syncable::Directory* dir);
// The constructor may be called from a thread that is not the Syncer's
// dedicated thread, to allow some flexibility in the setup.
@@ -193,9 +193,10 @@ class Syncer {
// A callback hook used in unittests to simulate changes between conflict set
// building and conflict resolution.
- TestCallbackFunction pre_conflict_resolution_function_;
+ Closure* pre_conflict_resolution_closure_;
- FRIEND_TEST(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3);
+ FRIEND_TEST(SusanDeletingTest,
+ NewServerItemInAFolderHierarchyWeHaveDeleted3);
FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParent);
FRIEND_TEST(SyncerTest, TestCommitListOrderingAndNewParentAndChild);
FRIEND_TEST(SyncerTest, TestCommitListOrderingCounterexample);
diff --git a/chrome/browser/sync/engine/syncer_proto_util.cc b/chrome/browser/sync/engine/syncer_proto_util.cc
index 5767d20..75b6689 100644..100755
--- a/chrome/browser/sync/engine/syncer_proto_util.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util.cc
@@ -167,7 +167,7 @@ bool SyncerProtoUtil::PostClientToServerMessage(ClientToServerMessage* msg,
// static
bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
const SyncEntity& server_entry) {
- SyncName name = NameFromSyncEntity(server_entry);
+ const std::string name = NameFromSyncEntity(server_entry);
CHECK(local_entry.Get(ID) == server_entry.id()) <<
" SyncerProtoUtil::Compare precondition not met.";
@@ -185,7 +185,7 @@ bool SyncerProtoUtil::Compare(const syncable::Entry& local_entry,
// These checks are somewhat prolix, but they're easier to debug than a big
// boolean statement.
- SyncName client_name = local_entry.GetName();
+ PathString client_name = local_entry.Get(syncable::NON_UNIQUE_NAME);
if (client_name != name) {
LOG(WARNING) << "Client name mismatch";
return false;
@@ -235,23 +235,25 @@ void SyncerProtoUtil::CopyBlobIntoProtoBytes(const syncable::Blob& blob,
}
// static
-syncable::SyncName SyncerProtoUtil::NameFromSyncEntity(
+std::string SyncerProtoUtil::NameFromSyncEntity(
const SyncEntity& entry) {
- SyncName result(entry.name());
+
if (entry.has_non_unique_name()) {
- result.set_non_unique_value(entry.non_unique_name());
+ return entry.non_unique_name();
}
- return result;
+
+ return entry.name();
}
// static
-syncable::SyncName SyncerProtoUtil::NameFromCommitEntryResponse(
+std::string SyncerProtoUtil::NameFromCommitEntryResponse(
const CommitResponse_EntryResponse& entry) {
- SyncName result(entry.name());
+
if (entry.has_non_unique_name()) {
- result.set_non_unique_value(entry.non_unique_name());
+ return entry.non_unique_name();
}
- return result;
+
+ return entry.name();
}
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/syncer_proto_util.h b/chrome/browser/sync/engine/syncer_proto_util.h
index e3583cf..287cf49 100644..100755
--- a/chrome/browser/sync/engine/syncer_proto_util.h
+++ b/chrome/browser/sync/engine/syncer_proto_util.h
@@ -56,11 +56,12 @@ class SyncerProtoUtil {
static void CopyBlobIntoProtoBytes(const syncable::Blob& blob,
std::string* proto_bytes);
- // Extract the name fields from a sync entity.
- static syncable::SyncName NameFromSyncEntity(const SyncEntity& entry);
+ // Extract the name field from a sync entity.
+ static std::string NameFromSyncEntity(const SyncEntity& entry);
- // Extract the name fields from a commit entry response.
- static syncable::SyncName NameFromCommitEntryResponse(
+
+ // Extract the name field from a commit entry response.
+ static std::string NameFromCommitEntryResponse(
const CommitResponse_EntryResponse& entry);
private:
diff --git a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
index dbb1730..7f7ba1f 100644
--- a/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_proto_util_unittest.cc
@@ -75,43 +75,62 @@ TEST(SyncerProtoUtil, NameExtractionOneName) {
SyncEntity one_name_entity;
CommitResponse_EntryResponse one_name_response;
- PathString one_name_string(PSTR("Eggheadednesses"));
- one_name_entity.set_name("Eggheadednesses");
- one_name_response.set_name("Eggheadednesses");
+ const std::string one_name_string("Eggheadednesses");
+ one_name_entity.set_name(one_name_string);
+ one_name_response.set_name(one_name_string);
- SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
- EXPECT_EQ(one_name_string, name_a.value());
- EXPECT_EQ(one_name_string, name_a.non_unique_value());
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a);
- SyncName name_b =
+ const std::string name_b =
SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
- EXPECT_EQ(one_name_string, name_b.value());
- EXPECT_EQ(one_name_string, name_b.non_unique_value());
+ EXPECT_EQ(one_name_string, name_b);
+ EXPECT_TRUE(name_a == name_b);
+}
+
+TEST(SyncerProtoUtil, NameExtractionOneUniqueName) {
+ SyncEntity one_name_entity;
+ CommitResponse_EntryResponse one_name_response;
+
+ const std::string one_name_string("Eggheadednesses");
+ one_name_entity.set_non_unique_name(one_name_string);
+ one_name_response.set_non_unique_name(one_name_string);
+
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(one_name_entity);
+ EXPECT_EQ(one_name_string, name_a);
+
+ const std::string name_b =
+ SyncerProtoUtil::NameFromCommitEntryResponse(one_name_response);
+ EXPECT_EQ(one_name_string, name_b);
EXPECT_TRUE(name_a == name_b);
}
// Tests NameFromSyncEntity and NameFromCommitEntryResponse when both the name
// field and the non_unique_name fields are provided.
+// Should prioritize non_unique_name.
TEST(SyncerProtoUtil, NameExtractionTwoNames) {
SyncEntity two_name_entity;
CommitResponse_EntryResponse two_name_response;
- PathString two_name_string_unique(PSTR("Oxyphenbutazone"));
- two_name_entity.set_name("Oxyphenbutazone");
- two_name_response.set_name("Oxyphenbutazone");
- PathString two_name_string(PSTR("Neuroanatomists"));
- two_name_entity.set_non_unique_name("Neuroanatomists");
- two_name_response.set_non_unique_name("Neuroanatomists");
+ const std::string neuro("Neuroanatomists");
+ const std::string oxyphen("Oxyphenbutazone");
+
+ two_name_entity.set_name(oxyphen);
+ two_name_entity.set_non_unique_name(neuro);
+
+ two_name_response.set_name(oxyphen);
+ two_name_response.set_non_unique_name(neuro);
- SyncName name_a = SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
- EXPECT_EQ(two_name_string_unique, name_a.value());
- EXPECT_EQ(two_name_string, name_a.non_unique_value());
+ const std::string name_a =
+ SyncerProtoUtil::NameFromSyncEntity(two_name_entity);
+ EXPECT_EQ(neuro, name_a);
- SyncName name_b =
+ const std::string name_b =
SyncerProtoUtil::NameFromCommitEntryResponse(two_name_response);
- EXPECT_EQ(two_name_string_unique, name_b.value());
- EXPECT_EQ(two_name_string, name_b.non_unique_value());
+ EXPECT_EQ(neuro, name_b);
EXPECT_TRUE(name_a == name_b);
}
diff --git a/chrome/browser/sync/engine/syncer_types.h b/chrome/browser/sync/engine/syncer_types.h
index 7d8eda4..0cb1e56 100644..100755
--- a/chrome/browser/sync/engine/syncer_types.h
+++ b/chrome/browser/sync/engine/syncer_types.h
@@ -31,11 +31,6 @@ enum UpdateAttemptResponse {
// Conflicts with the local data representation. This can also mean that the
// entry doesn't currently make sense if we applied it.
CONFLICT,
-
- // This return value is only returned by AttemptToUpdateEntryWithoutMerge
- // if we have a name conflict. Users of AttemptToUpdateEntry should never
- // see this return value, we'll return CONFLICT.
- NAME_CONFLICT,
};
enum ServerUpdateProcessingResult {
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 1d6c209..31840cd 100644..100755
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -27,10 +27,12 @@
#include "chrome/browser/sync/protocol/sync.pb.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/closure.h"
#include "chrome/browser/sync/util/event_sys-inl.h"
#include "chrome/test/sync/engine/mock_server_connection.h"
#include "chrome/test/sync/engine/test_directory_setter_upper.h"
#include "chrome/test/sync/engine/test_id_factory.h"
+#include "chrome/test/sync/engine/test_syncable_utils.h"
#include "testing/gtest/include/gtest/gtest.h"
using std::map;
@@ -42,10 +44,13 @@ namespace browser_sync {
using syncable::BaseTransaction;
using syncable::Blob;
+using syncable::CountEntriesWithName;
using syncable::Directory;
using syncable::Entry;
using syncable::ExtendedAttribute;
using syncable::ExtendedAttributeKey;
+using syncable::GetFirstEntryWithName;
+using syncable::GetOnlyEntryWithName;
using syncable::Id;
using syncable::MutableEntry;
using syncable::MutableExtendedAttribute;
@@ -58,8 +63,6 @@ using syncable::CREATE;
using syncable::CREATE_NEW_UPDATE_ITEM;
using syncable::GET_BY_HANDLE;
using syncable::GET_BY_ID;
-using syncable::GET_BY_PARENTID_AND_NAME;
-using syncable::GET_BY_PATH;
using syncable::GET_BY_TAG;
using syncable::ID;
using syncable::IS_BOOKMARK_OBJECT;
@@ -69,18 +72,17 @@ using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
using syncable::META_HANDLE;
using syncable::MTIME;
-using syncable::NAME;
using syncable::NEXT_ID;
+using syncable::NON_UNIQUE_NAME;
using syncable::PARENT_ID;
using syncable::PREV_ID;
using syncable::SERVER_IS_DEL;
-using syncable::SERVER_NAME;
+using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
using syncable::SERVER_VERSION;
using syncable::SINGLETON_TAG;
using syncable::UNITTEST;
-using syncable::UNSANITIZED_NAME;
namespace {
const char* kTestData = "Hello World!";
@@ -165,7 +167,7 @@ class SyncerTest : public testing::Test {
dir->GetChildHandles(&trans, trans.root_id(), &children);
ASSERT_TRUE(0 == children.size());
syncer_events_.clear();
- root_id_ = ids_.root();
+ root_id_ = TestIdFactory::root();
parent_id_ = ids_.MakeServer("parent id");
child_id_ = ids_.MakeServer("child id");
}
@@ -341,6 +343,8 @@ class SyncerTest : public testing::Test {
// Some ids to aid tests. Only the root one's value is specific. The rest
// are named for test clarity.
+ // TODO(chron): Get rid of these inbuilt IDs. They only make it
+ // more confusing.
syncable::Id root_id_;
syncable::Id parent_id_;
syncable::Id child_id_;
@@ -815,9 +819,14 @@ TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+
+ PathString parent1_name = PSTR("1");
+ PathString parent2_name = PSTR("A");
+ PathString child_name = PSTR("B");
+
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), parent1_name);
ASSERT_TRUE(parent.good());
parent.Put(syncable::IS_UNSYNCED, true);
parent.Put(syncable::IS_DIR, true);
@@ -826,19 +835,20 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
}
syncable::Id parent2_id = ids_.NewLocalId();
- syncable::Id child2_id = ids_.NewServerId();
+ syncable::Id child_id = ids_.NewServerId();
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
- ASSERT_TRUE(parent.good());
- parent.Put(syncable::IS_UNSYNCED, true);
- parent.Put(syncable::IS_DIR, true);
- parent.Put(syncable::ID, parent2_id);
- MutableEntry child(&wtrans, syncable::CREATE, parent2_id, PSTR("B"));
+ MutableEntry parent2(&wtrans, syncable::CREATE, parent_id_, parent2_name);
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(syncable::IS_UNSYNCED, true);
+ parent2.Put(syncable::IS_DIR, true);
+ parent2.Put(syncable::ID, parent2_id);
+
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_id, child_name);
ASSERT_TRUE(child.good());
child.Put(syncable::IS_UNSYNCED, true);
child.Put(syncable::IS_DIR, true);
- child.Put(syncable::ID, child2_id);
+ child.Put(syncable::ID, child_id);
child.Put(syncable::BASE_VERSION, 1);
}
@@ -851,47 +861,69 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
EXPECT_TRUE(parent2_id == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(child2_id == mock_server_->committed_ids()[2]);
+ EXPECT_TRUE(child_id == mock_server_->committed_ids()[2]);
{
ReadTransaction rtrans(dir, __FILE__, __LINE__);
- PathChar path[] = { '1', *kPathSeparator, 'A', 0};
- Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
- ASSERT_TRUE(entry_1A.good());
- Entry item_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
- ASSERT_FALSE(item_parent2.good());
- Entry item_child2(&rtrans, syncable::GET_BY_ID, child2_id);
- EXPECT_EQ(entry_1A.Get(syncable::ID), item_child2.Get(syncable::PARENT_ID));
- EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ // Check that things committed correctly.
+ Entry entry_1(&rtrans, syncable::GET_BY_ID, parent_id_);
+ EXPECT_EQ(entry_1.Get(NON_UNIQUE_NAME), parent1_name);
+ // Check that parent2 is a subfolder of parent1.
+ EXPECT_EQ(1, CountEntriesWithName(&rtrans,
+ parent_id_,
+ parent2_name));
+
+ // Parent2 was a local ID and thus should have changed on commit!
+ Entry pre_commit_entry_parent2(&rtrans, syncable::GET_BY_ID, parent2_id);
+ ASSERT_FALSE(pre_commit_entry_parent2.good());
+
+ // Look up the new ID.
+ Id parent2_committed_id =
+ GetOnlyEntryWithName(&rtrans, parent_id_, parent2_name);
+ EXPECT_TRUE(parent2_committed_id.ServerKnows());
+
+ Entry child(&rtrans, syncable::GET_BY_ID, child_id);
+ EXPECT_EQ(parent2_committed_id, child.Get(syncable::PARENT_ID));
}
}
TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+
+ PathString parent_name = PSTR("1");
+ PathString parent2_name = PSTR("A");
+ PathString child_name = PSTR("B");
+
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, syncable::CREATE, wtrans.root_id(), PSTR("1"));
+ MutableEntry parent(&wtrans,
+ syncable::CREATE,
+ wtrans.root_id(),
+ parent_name);
ASSERT_TRUE(parent.good());
parent.Put(syncable::IS_UNSYNCED, true);
parent.Put(syncable::IS_DIR, true);
parent.Put(syncable::ID, parent_id_);
parent.Put(syncable::BASE_VERSION, 1);
}
+
int64 meta_handle_a, meta_handle_b;
+ const Id parent2_local_id = ids_.NewLocalId();
+ const Id child_local_id = ids_.NewLocalId();
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, syncable::CREATE, parent_id_, PSTR("A"));
- ASSERT_TRUE(parent.good());
- parent.Put(syncable::IS_UNSYNCED, true);
- parent.Put(syncable::IS_DIR, true);
- parent.Put(syncable::ID, ids_.FromNumber(-101));
- meta_handle_a = parent.Get(syncable::META_HANDLE);
- MutableEntry child(&wtrans, syncable::CREATE, ids_.FromNumber(-101),
- PSTR("B"));
+ MutableEntry parent2(&wtrans, syncable::CREATE, parent_id_, parent2_name);
+ ASSERT_TRUE(parent2.good());
+ parent2.Put(syncable::IS_UNSYNCED, true);
+ parent2.Put(syncable::IS_DIR, true);
+
+ parent2.Put(syncable::ID, parent2_local_id);
+ meta_handle_a = parent2.Get(syncable::META_HANDLE);
+ MutableEntry child(&wtrans, syncable::CREATE, parent2_local_id, child_name);
ASSERT_TRUE(child.good());
child.Put(syncable::IS_UNSYNCED, true);
child.Put(syncable::IS_DIR, true);
- child.Put(syncable::ID, ids_.FromNumber(-102));
+ child.Put(syncable::ID, child_local_id);
meta_handle_b = child.Get(syncable::META_HANDLE);
}
@@ -903,19 +935,30 @@ TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
ASSERT_TRUE(3 == mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_TRUE(parent_id_ == mock_server_->committed_ids()[0]);
- EXPECT_TRUE(ids_.FromNumber(-101) == mock_server_->committed_ids()[1]);
- EXPECT_TRUE(ids_.FromNumber(-102) == mock_server_->committed_ids()[2]);
+ EXPECT_TRUE(parent2_local_id == mock_server_->committed_ids()[1]);
+ EXPECT_TRUE(child_local_id == mock_server_->committed_ids()[2]);
{
ReadTransaction rtrans(dir, __FILE__, __LINE__);
- PathChar path[] = { '1', *kPathSeparator, 'A', 0};
- Entry entry_1A(&rtrans, syncable::GET_BY_PATH, path);
- ASSERT_TRUE(entry_1A.good());
- Entry entry_id_minus_101(&rtrans, syncable::GET_BY_ID,
- ids_.FromNumber(-101));
- ASSERT_FALSE(entry_id_minus_101.good());
+
+ Entry parent(&rtrans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&rtrans, rtrans.root_id(), parent_name));
+ ASSERT_TRUE(parent.good());
+ EXPECT_TRUE(parent.Get(syncable::ID).ServerKnows());
+
+ Entry parent2(&rtrans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&rtrans, parent.Get(ID), parent2_name));
+ ASSERT_TRUE(parent2.good());
+ EXPECT_TRUE(parent2.Get(syncable::ID).ServerKnows());
+
+ // Id changed on commit, so this should fail.
+ Entry local_parent2_id_entry(&rtrans,
+ syncable::GET_BY_ID,
+ parent2_local_id);
+ ASSERT_FALSE(local_parent2_id_entry.good());
+
Entry entry_b(&rtrans, syncable::GET_BY_HANDLE, meta_handle_b);
- EXPECT_TRUE(entry_1A.Get(syncable::ID) == entry_b.Get(syncable::PARENT_ID));
- EXPECT_TRUE(entry_1A.Get(syncable::ID).ServerKnows());
+ EXPECT_TRUE(entry_b.Get(syncable::ID).ServerKnows());
+ EXPECT_TRUE(parent2.Get(syncable::ID) == entry_b.Get(syncable::PARENT_ID));
}
}
@@ -933,203 +976,19 @@ TEST_F(SyncerTest, UpdateWithZeroLengthName) {
syncer_->SyncShare();
}
-#if defined(OS_WIN)
-TEST_F(SyncerTest, NameSanitizationWithClientRename) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
- syncer_->SyncShare();
- {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("okay"));
- ASSERT_TRUE(e.good());
- }
- mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
- syncer_->SyncShare();
- {
- WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("prn~1"));
- ASSERT_TRUE(e.good());
- e.PutName(syncable::Name(PSTR("printer")));
- e.Put(syncable::IS_UNSYNCED, true);
- }
- syncer_->SyncShare();
- {
- vector<CommitMessage*>::const_reverse_iterator it =
- mock_server_->commit_messages().rbegin();
- ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
- const sync_pb::SyncEntity *const *s = (*it)->entries().data();
- int s_len = (*it)->entries_size();
- ASSERT_TRUE(1 == s_len);
- ASSERT_TRUE("printer" == (*s)[0].name());
- }
-}
-
-TEST_F(SyncerTest, NameSanitizationWithCascade) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "prn~1", 1, 10);
- syncer_->SyncShare();
- {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("prn~1"));
- ASSERT_TRUE(e.good());
- }
- mock_server_->AddUpdateDirectory(2, 0, "prn", 1, 20);
- syncer_->SyncShare();
- {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("prn~2"));
- ASSERT_TRUE(e.good());
- }
- mock_server_->AddUpdateDirectory(3, 0, "prn~2", 1, 30);
- syncer_->SyncShare();
- {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("prn~3"));
- ASSERT_TRUE(e.good());
- }
-}
-
-TEST_F(SyncerTest, GetStuckWithConflictingSanitizedNames) {
- // We should get stuck here because we get two server updates with exactly the
- // same name.
+TEST_F(SyncerTest, DontGetStuckWithTwoSameNames) {
+ // We should not get stuck here because we get
+ // two server updates with exactly the same name.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
mock_server_->AddUpdateDirectory(1, 0, "foo:", 1, 10);
syncer_->SyncShare();
mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
SyncRepeatedlyToTriggerStuckSignal(state_.get());
- EXPECT_TRUE(SyncerStuck(state_.get()));
+ EXPECT_FALSE(SyncerStuck(state_.get()));
syncer_events_.clear();
}
-TEST_F(SyncerTest, MergeFolderWithSanitizedNameMatches) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- {
- WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("Folder"));
- ASSERT_TRUE(parent.good());
- parent.Put(IS_DIR, true);
- parent.Put(IS_UNSYNCED, true);
- parent.Put(UNSANITIZED_NAME, PSTR("Folder:"));
- }
- mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
- syncer_->SyncShare();
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Directory::ChildHandles children;
- dir->GetChildHandles(&trans, trans.root_id(), &children);
- EXPECT_TRUE(1 == children.size());
- Directory::UnappliedUpdateMetaHandles unapplied;
- dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
- EXPECT_TRUE(0 == unapplied.size());
- syncable::Directory::UnsyncedMetaHandles unsynced;
- dir->GetUnsyncedMetaHandles(&trans, &unsynced);
- EXPECT_TRUE(0 == unsynced.size());
- syncer_events_.clear();
- }
-}
-
-// These two tests are the same as the two above, but they introduce case
-// changes.
-TEST_F(SyncerTest, GetStuckWithSanitizedNamesThatDifferOnlyByCase) {
- // We should get stuck here because we get two server updates with exactly the
- // same name.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "FOO:", 1, 10);
- syncer_->SyncShare();
- mock_server_->AddUpdateDirectory(2, 0, "foo:", 1, 20);
- SyncRepeatedlyToTriggerStuckSignal(state_.get());
- EXPECT_TRUE(SyncerStuck(state_.get()));
- syncer_events_.clear();
-}
-
-TEST_F(SyncerTest, MergeFolderWithSanitizedNameThatDiffersOnlyByCase) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- {
- WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, CREATE, wtrans.root_id(), PSTR("FOLDER"));
- ASSERT_TRUE(parent.good());
- parent.Put(IS_DIR, true);
- parent.Put(IS_UNSYNCED, true);
- parent.Put(UNSANITIZED_NAME, PSTR("FOLDER:"));
- }
- mock_server_->AddUpdateDirectory(100, 0, "Folder:", 10, 10);
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- syncer_->SyncShare();
- syncer_->SyncShare(); // Good gracious, these tests are not so good.
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Directory::ChildHandles children;
- dir->GetChildHandles(&trans, trans.root_id(), &children);
- EXPECT_TRUE(1 == children.size());
- Directory::UnappliedUpdateMetaHandles unapplied;
- dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
- EXPECT_TRUE(0 == unapplied.size());
- syncable::Directory::UnsyncedMetaHandles unsynced;
- dir->GetUnsyncedMetaHandles(&trans, &unsynced);
- EXPECT_TRUE(0 == unsynced.size());
- syncer_events_.clear();
- }
-}
-#else // Mac / Linux ...
-
-TEST_F(SyncerTest, NameSanitizationWithClientRename) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "okay", 1, 10);
- syncer_->SyncShare();
- {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("okay"));
- ASSERT_TRUE(e.good());
- }
- mock_server_->AddUpdateDirectory(2, 0, "a/b", 1, 20);
- syncer_->SyncShare();
- {
- WriteTransaction tr(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- PSTR("a:b"));
- ASSERT_TRUE(e.good());
- e.PutName(syncable::Name(PSTR("ab")));
- e.Put(syncable::IS_UNSYNCED, true);
- }
- syncer_->SyncShare();
- {
- vector<CommitMessage*>::const_reverse_iterator it =
- mock_server_->commit_messages().rbegin();
- ASSERT_TRUE(mock_server_->commit_messages().rend() != it);
- const sync_pb::SyncEntity *const *s = (*it)->entries().data();
- int s_len = (*it)->entries_size();
- ASSERT_TRUE(1 == s_len);
- ASSERT_TRUE("ab" == (*s)[0].name());
- }
-}
-#endif
-
-namespace {
-void VerifyExistsWithNameInRoot(syncable::Directory* dir,
- const PathString& name,
- const string& entry,
- int line) {
- ReadTransaction tr(dir, __FILE__, __LINE__);
- Entry e(&tr, syncable::GET_BY_PARENTID_AND_NAME, tr.root_id(),
- name);
- EXPECT_TRUE(e.good()) << "failed on call from " << entry << ":" << line;
-}
-} // namespace
-
TEST_F(SyncerTest, ExtendedAttributeWithNullCharacter) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
@@ -1203,13 +1062,13 @@ TEST_F(SyncerTest, TestBasicUpdate) {
}
TEST_F(SyncerTest, IllegalAndLegalUpdates) {
- Id root = ids_.root();
+ Id root = TestIdFactory::root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
// Should apply just fine.
mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10);
- // Name clash: this is a conflict.
+ // Same name. But this SHOULD work.
mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10);
// Unknown parent: should never be applied. "-80" is a legal server ID,
@@ -1223,8 +1082,8 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
ConflictResolutionView conflict_view(state_.get());
SyncerStatus status(NULL, state_.get());
- // Ids 2 and 3 are expected to be in conflict now.
- EXPECT_TRUE(2 == conflict_view.conflicting_updates());
+ // Id 3 should be in conflict now.
+ EXPECT_TRUE(1 == conflict_view.conflicting_updates());
// These entries will be used in the second set of updates.
mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10);
@@ -1236,17 +1095,18 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
syncer_->SyncShare(state_.get());
// The three items with an unresolved parent should be unapplied (3, 9, 100).
- // The name clash should also still be in conflict.
- EXPECT_TRUE(4 == conflict_view.conflicting_updates());
+ EXPECT_TRUE(3 == conflict_view.conflicting_updates());
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ // Even though it has the same name, it should work.
Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(name_clash.good());
- EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(name_clash.Get(IS_UNAPPLIED_UPDATE))
+ << "Duplicate name SHOULD be OK.";
Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
ASSERT_TRUE(bad_parent.good());
- EXPECT_TRUE(name_clash.Get(IS_UNAPPLIED_UPDATE))
+ EXPECT_TRUE(bad_parent.Get(IS_UNAPPLIED_UPDATE))
<< "child of unknown parent should be in conflict";
Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
@@ -1260,7 +1120,7 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
<< "great-grandchild of unknown parent should be in conflict";
}
- // Updating 1 should unblock the clashing item 2.
+ // Updating 1 should not affect item 2 of the same name.
mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20);
// Moving 5 under 6 will create a cycle: a conflict.
@@ -1275,6 +1135,7 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
syncer_->SyncShare(state_.get());
{
ReadTransaction trans(dir, __FILE__, __LINE__);
+
Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
ASSERT_TRUE(still_a_dir.good());
EXPECT_FALSE(still_a_dir.Get(IS_UNAPPLIED_UPDATE));
@@ -1282,21 +1143,25 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
EXPECT_TRUE(10 == still_a_dir.Get(SERVER_VERSION));
EXPECT_TRUE(still_a_dir.Get(IS_DIR));
- Entry rename(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("new_name"));
+ Entry rename(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(rename.good());
+ EXPECT_EQ(root, rename.Get(PARENT_ID));
+ EXPECT_EQ(PSTR("new_name"), rename.Get(NON_UNIQUE_NAME));
EXPECT_FALSE(rename.Get(IS_UNAPPLIED_UPDATE));
EXPECT_TRUE(ids_.FromNumber(1) == rename.Get(ID));
EXPECT_TRUE(20 == rename.Get(BASE_VERSION));
- Entry unblocked(&trans, GET_BY_PARENTID_AND_NAME, root, PSTR("in_root"));
- ASSERT_TRUE(unblocked.good());
- EXPECT_FALSE(unblocked.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(ids_.FromNumber(2) == unblocked.Get(ID));
- EXPECT_TRUE(10 == unblocked.Get(BASE_VERSION));
+ Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(name_clash.good());
+ EXPECT_EQ(root, name_clash.Get(PARENT_ID));
+ EXPECT_TRUE(ids_.FromNumber(2) == name_clash.Get(ID));
+ EXPECT_TRUE(10 == name_clash.Get(BASE_VERSION));
+ EXPECT_EQ(PSTR("in_root"), name_clash.Get(NON_UNIQUE_NAME));
Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
ASSERT_TRUE(ignored_old_version.good());
- EXPECT_TRUE(ignored_old_version.Get(NAME) == PSTR("newer_version"));
+ EXPECT_TRUE(
+ ignored_old_version.Get(NON_UNIQUE_NAME) == PSTR("newer_version"));
EXPECT_FALSE(ignored_old_version.Get(IS_UNAPPLIED_UPDATE));
EXPECT_TRUE(20 == ignored_old_version.Get(BASE_VERSION));
@@ -1324,6 +1189,9 @@ TEST_F(SyncerTest, IllegalAndLegalUpdates) {
TEST_F(SyncerTest, CommitTimeRename) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+ int64 metahandle_folder;
+ int64 metahandle_new_entry;
+
// Create a folder and an entry.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
@@ -1331,8 +1199,11 @@ TEST_F(SyncerTest, CommitTimeRename) {
ASSERT_TRUE(parent.good());
parent.Put(IS_DIR, true);
parent.Put(IS_UNSYNCED, true);
+ metahandle_folder = parent.Get(META_HANDLE);
+
MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
ASSERT_TRUE(entry.good());
+ metahandle_new_entry = entry.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &entry);
}
@@ -1344,17 +1215,19 @@ TEST_F(SyncerTest, CommitTimeRename) {
// Verify it was correctly renamed.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry_folder(&trans, GET_BY_PATH, PSTR("renamed_Folder"));
+ Entry entry_folder(&trans, GET_BY_HANDLE, metahandle_folder);
ASSERT_TRUE(entry_folder.good());
+ EXPECT_EQ(PSTR("renamed_Folder"), entry_folder.Get(NON_UNIQUE_NAME));
- Entry entry_new(&trans, GET_BY_PATH,
- PSTR("renamed_Folder") + PathString(kPathSeparator)
- + PSTR("renamed_new_entry"));
+ Entry entry_new(&trans, GET_BY_HANDLE, metahandle_new_entry);
ASSERT_TRUE(entry_new.good());
+ EXPECT_EQ(entry_folder.Get(ID), entry_new.Get(PARENT_ID));
+ EXPECT_EQ(PSTR("renamed_new_entry"), entry_new.Get(NON_UNIQUE_NAME));
// And that the unrelated directory creation worked without a rename.
- Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
+ Entry new_dir(&trans, GET_BY_ID, ids_.FromNumber(2));
EXPECT_TRUE(new_dir.good());
+ EXPECT_EQ(PSTR("dir_in_root"), new_dir.Get(NON_UNIQUE_NAME));
}
}
@@ -1367,109 +1240,56 @@ TEST_F(SyncerTest, CommitTimeRenameI18N) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
- // Create a folder and entry.
+ int64 metahandle;
+ // Create a folder, expect a commit time rename.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry parent(&trans, CREATE, root_id_, PSTR("Folder"));
ASSERT_TRUE(parent.good());
parent.Put(IS_DIR, true);
parent.Put(IS_UNSYNCED, true);
- MutableEntry entry(&trans, CREATE, parent.Get(ID), PSTR("new_entry"));
- ASSERT_TRUE(entry.good());
- WriteTestDataToEntry(&trans, &entry);
+ metahandle = parent.Get(META_HANDLE);
}
- // Mix in a directory creation too for later.
- mock_server_->AddUpdateDirectory(2, 0, "dir_in_root", 10, 10);
mock_server_->SetCommitTimeRename(i18nString);
syncer_->SyncShare();
// Verify it was correctly renamed.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- PathString expectedFolder(i18nString);
- expectedFolder.append("Folder");
- Entry entry_folder(&trans, GET_BY_PATH, expectedFolder);
- ASSERT_TRUE(entry_folder.good());
- PathString expected = expectedFolder + PathString(kPathSeparator);
- expected.append(i18nString);
- expected.append("new_entry");
-
- Entry entry_new(&trans, GET_BY_PATH, expected);
- ASSERT_TRUE(entry_new.good());
-
- // And that the unrelated directory creation worked without a rename.
- Entry new_dir(&trans, GET_BY_PATH, PSTR("dir_in_root"));
- EXPECT_TRUE(new_dir.good());
- }
-}
-
-TEST_F(SyncerTest, CommitTimeRenameCollision) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
- // Create a folder to collide with.
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry collider(&trans, CREATE, root_id_, PSTR("renamed_Folder"));
- ASSERT_TRUE(collider.good());
- collider.Put(IS_DIR, true);
- collider.Put(IS_UNSYNCED, true);
- }
- syncer_->SyncShare(); // Now we have a folder.
-
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry folder(&trans, CREATE, root_id_, PSTR("Folder"));
- ASSERT_TRUE(folder.good());
- folder.Put(IS_DIR, true);
- folder.Put(IS_UNSYNCED, true);
- }
+ PathString expected_folder_name(i18nString);
+ expected_folder_name.append("Folder");
- mock_server_->set_next_new_id(30000);
- mock_server_->SetCommitTimeRename("renamed_");
- syncer_->SyncShare(); // Should collide and rename aside.
- // This case will only occur if we got a commit time rename aside
- // and the server attempts to rename to an entry that we know about, but it
- // does not.
- // Verify it was correctly renamed; one of them should have a sanitized name.
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry collider_folder(&trans, GET_BY_PARENTID_AND_NAME, root_id_,
- PSTR("renamed_Folder"));
- EXPECT_TRUE(collider_folder.Get(UNSANITIZED_NAME) == PSTR(""));
- ASSERT_TRUE(collider_folder.good());
-
- // ID is generated by next_new_id_ and server mock prepending of strings.
- Entry entry_folder(&trans, GET_BY_ID,
- syncable::Id::CreateFromServerId("mock_server:30000"));
+ Entry entry_folder(&trans, GET_BY_HANDLE, metahandle);
ASSERT_TRUE(entry_folder.good());
- // A little arbitrary but nothing we can do about that.
- EXPECT_TRUE(entry_folder.Get(NAME) == PSTR("renamed_Folder~1"));
- EXPECT_TRUE(entry_folder.Get(UNSANITIZED_NAME) == PSTR("renamed_Folder"));
+ EXPECT_EQ(expected_folder_name, entry_folder.Get(NON_UNIQUE_NAME));
}
}
-
// A commit with a lost response produces an update that has to be reunited with
// its parent.
TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+
// Create a folder in the root.
+ int64 metahandle_folder;
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
ASSERT_TRUE(entry.good());
entry.Put(IS_DIR, true);
entry.Put(IS_UNSYNCED, true);
+ metahandle_folder = entry.Get(META_HANDLE);
}
// Verify it and pull the ID out of the folder.
syncable::Id folder_id;
+ int64 metahandle_entry;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("new_folder"));
+ Entry entry(&trans, GET_BY_HANDLE, metahandle_folder);
ASSERT_TRUE(entry.good());
folder_id = entry.Get(ID);
ASSERT_TRUE(!folder_id.ServerKnows());
@@ -1480,6 +1300,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, CREATE, folder_id, PSTR("new_entry"));
ASSERT_TRUE(entry.good());
+ metahandle_entry = entry.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &entry);
}
@@ -1487,9 +1308,10 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
- PSTR("new_entry"));
+ Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
ASSERT_TRUE(entry.good());
+ EXPECT_EQ(folder_id, entry.Get(PARENT_ID));
+ EXPECT_EQ(PSTR("new_entry"), entry.Get(NON_UNIQUE_NAME));
entry_id = entry.Get(ID);
EXPECT_TRUE(!entry_id.ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
@@ -1501,7 +1323,7 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
syncable::Id new_folder_id =
syncable::Id::CreateFromServerId("folder_server_id");
- // the following update should cause the folder to both apply the update, as
+ // The following update should cause the folder to both apply the update, as
// well as reassociate the id.
mock_server_->AddUpdateDirectory(new_folder_id, root_id_,
"new_folder", new_version, timestamp);
@@ -1516,21 +1338,23 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
{
// The folder's ID should have been updated.
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry folder(&trans, GET_BY_PATH, PSTR("new_folder"));
+ Entry folder(&trans, GET_BY_HANDLE, metahandle_folder);
ASSERT_TRUE(folder.good());
+ EXPECT_EQ(PSTR("new_folder"), folder.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(new_version == folder.Get(BASE_VERSION));
EXPECT_TRUE(new_folder_id == folder.Get(ID));
EXPECT_TRUE(folder.Get(ID).ServerKnows());
+ EXPECT_EQ(trans.root_id(), folder.Get(PARENT_ID));
- // We changed the id of the parent, old lookups should fail.
- Entry bad_entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, folder_id,
- PSTR("new_entry"));
- EXPECT_FALSE(bad_entry.good());
+ // Since it was updated, the old folder should not exist.
+ Entry old_dead_folder(&trans, GET_BY_ID, folder_id);
+ EXPECT_FALSE(old_dead_folder.good());
- // The child's parent should have changed as well.
- Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, new_folder_id,
- PSTR("new_entry"));
+ // The child's parent should have changed.
+ Entry entry(&trans, syncable::GET_BY_HANDLE, metahandle_entry);
ASSERT_TRUE(entry.good());
+ EXPECT_EQ(PSTR("new_entry"), entry.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(new_folder_id, entry.Get(PARENT_ID));
EXPECT_TRUE(!entry.Get(ID).ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
}
@@ -1541,18 +1365,23 @@ TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
TEST_F(SyncerTest, CommitReuniteUpdate) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+
// Create an entry in the root.
+ int64 entry_metahandle;
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
ASSERT_TRUE(entry.good());
+ entry_metahandle = entry.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &entry);
}
+
// Verify it and pull the ID out.
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
entry_id = entry.Get(ID);
EXPECT_TRUE(!entry_id.ServerKnows());
@@ -1577,10 +1406,11 @@ TEST_F(SyncerTest, CommitReuniteUpdate) {
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
EXPECT_TRUE(new_version == entry.Get(BASE_VERSION));
EXPECT_TRUE(new_entry_id == entry.Get(ID));
+ EXPECT_EQ(PSTR("new_entry"), entry.Get(NON_UNIQUE_NAME));
}
}
@@ -1592,18 +1422,21 @@ TEST_F(SyncerTest, CommitReuniteUpdate) {
TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
+
// Create a entry in the root.
+ int64 entry_metahandle;
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("new_entry"));
ASSERT_TRUE(entry.good());
+ entry_metahandle = entry.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &entry);
}
// Verify it and pull the ID out.
syncable::Id entry_id;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
entry_id = entry.Get(ID);
EXPECT_TRUE(!entry_id.ServerKnows());
@@ -1628,7 +1461,9 @@ TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
// Purposefully delete the entry now before the update application finishes.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ Id new_entry_id = GetOnlyEntryWithName(
+ &trans, trans.root_id(), PSTR("new_entry"));
+ MutableEntry entry(&trans, GET_BY_ID, new_entry_id);
ASSERT_TRUE(entry.good());
entry.Put(syncable::IS_DEL, true);
}
@@ -1637,7 +1472,9 @@ TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("new_entry"));
+ Id new_entry_id = GetOnlyEntryWithName(
+ &trans, trans.root_id(), PSTR("new_entry"));
+ Entry entry(&trans, GET_BY_ID, new_entry_id);
ASSERT_TRUE(entry.good());
EXPECT_FALSE(entry.Get(IS_DEL));
@@ -1742,62 +1579,64 @@ TEST_F(SyncerTest, ReverseFolderOrderingTest) {
mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10);
LoopSyncShare(syncer_);
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry child(&trans, syncable::GET_BY_PARENTID_AND_NAME, ids_.FromNumber(4),
- PSTR("gggchild"));
+
+ Id child_id = GetOnlyEntryWithName(
+ &trans, ids_.FromNumber(4), PSTR("gggchild"));
+ Entry child(&trans, GET_BY_ID, child_id);
ASSERT_TRUE(child.good());
}
-bool CreateFolderInBob(Directory* dir) {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
- PSTR("bob"));
- MutableEntry entry2(&trans, syncable::CREATE, bob.Get(syncable::ID),
- PSTR("bob"));
- CHECK(entry2.good());
- entry2.Put(syncable::IS_DIR, true);
- entry2.Put(syncable::IS_UNSYNCED, true);
- return true;
-}
+class EntryCreatedInNewFolderTest : public SyncerTest {
+ public:
+ void CreateFolderInBob() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
-TEST_F(SyncerTest, EntryCreatedInNewFolderMidSync) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry bob(&trans,
+ syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&trans,
+ TestIdFactory::root(),
+ PSTR("bob")));
+ CHECK(bob.good());
+
+ MutableEntry entry2(&trans, syncable::CREATE, bob.Get(syncable::ID),
+ PSTR("bob"));
+ CHECK(entry2.good());
+ entry2.Put(syncable::IS_DIR, true);
+ entry2.Put(syncable::IS_UNSYNCED, true);
+ }
+};
+
+TEST_F(EntryCreatedInNewFolderTest, EntryCreatedInNewFolderMidSync) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, syncable::CREATE, trans.root_id(), PSTR("bob"));
+ MutableEntry entry(&trans, syncable::CREATE, trans.root_id(),
+ PSTR("bob"));
ASSERT_TRUE(entry.good());
entry.Put(syncable::IS_DIR, true);
entry.Put(syncable::IS_UNSYNCED, true);
}
- mock_server_->SetMidCommitCallbackFunction(CreateFolderInBob);
+
+ mock_server_->SetMidCommitCallback(
+ NewCallback<EntryCreatedInNewFolderTest>(this,
+ &EntryCreatedInNewFolderTest::CreateFolderInBob));
syncer_->SyncShare(BUILD_COMMIT_REQUEST, SYNCER_END);
EXPECT_TRUE(1 == mock_server_->committed_ids().size());
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- PathChar path[] = {*kPathSeparator, 'b', 'o', 'b', 0};
- Entry entry(&trans, syncable::GET_BY_PATH, path);
- ASSERT_TRUE(entry.good());
- PathChar path2[] = {*kPathSeparator, 'b', 'o', 'b',
- *kPathSeparator, 'b', 'o', 'b', 0};
- Entry entry2(&trans, syncable::GET_BY_PATH, path2);
- ASSERT_TRUE(entry2.good());
- }
-}
+ Entry parent_entry(&trans, syncable::GET_BY_ID,
+ GetOnlyEntryWithName(&trans, TestIdFactory::root(), PSTR("bob")));
+ ASSERT_TRUE(parent_entry.good());
-bool TouchFredAndGingerInRoot(Directory* dir) {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry fred(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
- PSTR("fred"));
- CHECK(fred.good());
- // Equivalent to touching the entry.
- fred.Put(syncable::IS_UNSYNCED, true);
- fred.Put(syncable::SYNCING, false);
- MutableEntry ginger(&trans, syncable::GET_BY_PARENTID_AND_NAME,
- trans.root_id(), PSTR("ginger"));
- CHECK(ginger.good());
- ginger.Put(syncable::IS_UNSYNCED, true);
- ginger.Put(syncable::SYNCING, false);
- return true;
+ Id child_id =
+ GetOnlyEntryWithName(&trans, parent_entry.Get(ID), PSTR("bob"));
+ Entry child(&trans, syncable::GET_BY_ID, child_id);
+ ASSERT_TRUE(child.good());
+ EXPECT_EQ(parent_entry.Get(ID), child.Get(PARENT_ID));
+ }
}
TEST_F(SyncerTest, NegativeIDInUpdate) {
@@ -1811,12 +1650,15 @@ TEST_F(SyncerTest, NegativeIDInUpdate) {
TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+
+ int64 metahandle_fred;
{
// Create an item.
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry fred_match(&trans, CREATE, trans.root_id(),
PSTR("fred_match"));
ASSERT_TRUE(fred_match.good());
+ metahandle_fred = fred_match.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &fred_match);
}
// Commit it.
@@ -1827,7 +1669,7 @@ TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
{
// Now receive a change from outside.
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry fred_match(&trans, GET_BY_PATH, PSTR("fred_match"));
+ MutableEntry fred_match(&trans, GET_BY_HANDLE, metahandle_fred);
ASSERT_TRUE(fred_match.good());
EXPECT_TRUE(fred_match.Get(ID).ServerKnows());
fred_match_id = fred_match.Get(ID);
@@ -1840,233 +1682,6 @@ TEST_F(SyncerTest, UnappliedUpdateOnCreatedItemItemDoesNotCrash) {
}
}
-TEST_F(SyncerTest, NameClashWithResolverInconsistentUpdates) {
- // I'm unsure what the client should really do when the scenario in this old
- // test occurs. The set of updates we've received are not consistent.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- const char* base_name = "name_clash_with_resolver";
- const char* full_name = "name_clash_with_resolver.htm";
- const PathChar* base_name_p = PSTR("name_clash_with_resolver");
- mock_server_->AddUpdateBookmark(1, 0, full_name, 10, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry.good());
- WriteTestDataToEntry(&trans, &entry);
- }
- mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
- mock_server_->set_conflict_n_commits(1);
- syncer_->SyncShare();
- mock_server_->set_conflict_n_commits(1);
- syncer_->SyncShare();
- EXPECT_TRUE(0 == syncer_events_.size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- PathString id1name = id1.Get(NAME);
-
- EXPECT_TRUE(base_name_p == id1name.substr(0, strlen(base_name)));
- EXPECT_TRUE(PSTR(".htm") == id1name.substr(id1name.length() - 4));
- EXPECT_LE(id1name.length(), 200ul);
- EXPECT_TRUE(PSTR("name_clash_with_resolver.htm") == id2.Get(NAME));
- }
-}
-
-TEST_F(SyncerTest, NameClashWithResolver) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- const char* base_name = "name_clash_with_resolver";
- const char* full_name = "name_clash_with_resolver.htm";
- const PathChar* base_name_p = PSTR("name_clash_with_resolver");
- const PathChar* full_name_p = PSTR("name_clash_with_resolver.htm");
- mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry.good());
- entry.Put(NAME, full_name_p);
- WriteTestDataToEntry(&trans, &entry);
- }
- mock_server_->AddUpdateBookmark(2, 0, full_name, 10, 10);
- // We do NOT use LoopSyncShare here because of the way that
- // mock_server_->conflict_n_commits works.
- // It will only conflict the first n commits, so if we let the syncer loop,
- // the second commit of the update will succeed even though it shouldn't.
- mock_server_->set_conflict_n_commits(1);
- syncer_->SyncShare(state_.get());
- mock_server_->set_conflict_n_commits(1);
- syncer_->SyncShare(state_.get());
- EXPECT_TRUE(0 == syncer_events_.size());
- syncer_events_.clear();
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- PathString id1name = id1.Get(NAME);
-
- EXPECT_TRUE(base_name_p == id1name.substr(0, strlen(base_name)));
- EXPECT_TRUE(PSTR(".htm") == id1name.substr(id1name.length() - 4));
- EXPECT_LE(id1name.length(), 200ul);
- EXPECT_TRUE(full_name_p == id2.Get(NAME));
- }
-}
-
-TEST_F(SyncerTest, VeryLongNameClashWithResolver) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- string name;
- PathString name_w;
- name.resize(250, 'X');
- name_w.resize(250, 'X');
- name.append(".htm");
- name_w.append(PSTR(".htm"));
- mock_server_->AddUpdateBookmark(1, 0, "fred", 10, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry.good());
- entry.Put(NAME, name_w);
- WriteTestDataToEntry(&trans, &entry);
- }
- mock_server_->AddUpdateBookmark(2, 0, name, 10, 10);
- mock_server_->set_conflict_n_commits(1);
- // We do NOT use LoopSyncShare here because of the way that
- // mock_server_->conflict_n_commits works.
- // It will only conflict the first n commits, so if we let the syncer loop,
- // the second commit of the update will succeed even though it shouldn't.
- syncer_->SyncShare(state_.get());
- mock_server_->set_conflict_n_commits(1);
- syncer_->SyncShare(state_.get());
- EXPECT_TRUE(0 == syncer_events_.size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- PathString id1name = id1.Get(NAME);
- EXPECT_TRUE(PSTR(".htm") == id1name.substr(id1name.length() - 4));
- EXPECT_TRUE(name_w == id2.Get(NAME));
- }
-}
-
-TEST_F(SyncerTest, NameClashWithResolverAndDotStartedName) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateBookmark(1, 0, ".bob.htm", 10, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry.good());
- entry.Put(IS_UNSYNCED, true);
- entry.Put(NAME, PSTR(".htm"));
- WriteTestDataToEntry(&trans, &entry);
- }
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(2, 0, ".htm", 10, 10);
- syncer_->SyncShare();
- syncer_->SyncShare();
- EXPECT_TRUE(0 == syncer_events_.size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- PathString id1name = id1.Get(NAME);
- EXPECT_TRUE(PSTR(".htm") == id1name.substr(0, 4));
- EXPECT_TRUE(PSTR(".htm") == id2.Get(NAME));
- }
-}
-
-TEST_F(SyncerTest, ThreeNamesClashWithResolver) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateBookmark(1, 0, "in_root.htm", 10, 10);
- LoopSyncShare(syncer_);
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entry.good());
- ASSERT_FALSE(entry.Get(IS_DEL));
- entry.Put(IS_UNSYNCED, true);
- }
- mock_server_->AddUpdateBookmark(2, 0, "in_root.htm", 10, 10);
- LoopSyncShare(syncer_);
- LoopSyncShare(syncer_);
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entry.good());
- ASSERT_FALSE(entry.Get(IS_DEL));
- entry.Put(IS_UNSYNCED, true);
- }
- mock_server_->AddUpdateBookmark(3, 0, "in_root.htm", 10, 10);
- LoopSyncShare(syncer_);
- LoopSyncShare(syncer_);
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(entry.good());
- ASSERT_FALSE(entry.Get(IS_DEL));
- entry.Put(IS_UNSYNCED, true);
- }
- mock_server_->AddUpdateBookmark(4, 0, "in_root.htm", 10, 10);
- LoopSyncShare(syncer_);
- LoopSyncShare(syncer_);
- EXPECT_TRUE(0 == syncer_events_.size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- ASSERT_TRUE(id3.good());
- ASSERT_TRUE(id4.good());
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
- EXPECT_TRUE(root_id_ == id4.Get(PARENT_ID));
- PathString id1name = id1.Get(NAME);
- ASSERT_GE(id1name.length(), 4ul);
- EXPECT_TRUE(PSTR("in_root") == id1name.substr(0, 7));
- EXPECT_TRUE(PSTR(".htm") == id1name.substr(id1name.length() - 4));
- EXPECT_NE(PSTR("in_root.htm"), id1.Get(NAME));
- PathString id2name = id2.Get(NAME);
- ASSERT_GE(id2name.length(), 4ul);
- EXPECT_TRUE(PSTR("in_root") == id2name.substr(0, 7));
- EXPECT_TRUE(PSTR(".htm") == id2name.substr(id2name.length() - 4));
- EXPECT_NE(PSTR("in_root.htm"), id2.Get(NAME));
- PathString id3name = id3.Get(NAME);
- ASSERT_GE(id3name.length(), 4ul);
- EXPECT_TRUE(PSTR("in_root") == id3name.substr(0, 7));
- EXPECT_TRUE(PSTR(".htm") == id3name.substr(id3name.length() - 4));
- EXPECT_NE(PSTR("in_root.htm"), id3.Get(NAME));
- EXPECT_TRUE(PSTR("in_root.htm") == id4.Get(NAME));
- }
-}
-
/**
* In the event that we have a double changed entry, that is changed on both
* the client and the server, the conflict resolver should just drop one of
@@ -2107,12 +1722,13 @@ TEST_F(SyncerTest, DoublyChangedWithResolver) {
syncer_events_.clear();
}
-// We got this repro case when someone was editing entries while sync was
+// We got this repro case when someone was editing bookmarks while sync was
// occuring. The entry had changed out underneath the user.
TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
int64 test_time = 123456;
+ int64 entry_metahandle;
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&wtrans, syncable::CREATE, root_id_, PSTR("Pete"));
@@ -2121,6 +1737,7 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
entry.Put(syncable::IS_DIR, true);
entry.Put(syncable::IS_UNSYNCED, true);
entry.Put(syncable::MTIME, test_time);
+ entry_metahandle = entry.Get(META_HANDLE);
}
syncer_->SyncShare();
syncable::Id id;
@@ -2128,8 +1745,7 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
int64 server_position_in_parent;
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, syncable::GET_BY_PARENTID_AND_NAME, trans.root_id(),
- PSTR("Pete"));
+ Entry entry(&trans, syncable::GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
id = entry.Get(ID);
EXPECT_TRUE(id.ServerKnows());
@@ -2150,18 +1766,28 @@ TEST_F(SyncerTest, CommitsUpdateDoesntAlterEntry) {
TEST_F(SyncerTest, ParentAndChildBothMatch) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+ syncable::Id parent_id = ids_.NewServerId();
+ syncable::Id child_id = ids_.NewServerId();
+
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
ASSERT_TRUE(parent.good());
parent.Put(IS_DIR, true);
parent.Put(IS_UNSYNCED, true);
+ parent.Put(ID, parent_id);
+ parent.Put(BASE_VERSION, 1);
+ parent.Put(IS_BOOKMARK_OBJECT, true);
+
MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("test.htm"));
ASSERT_TRUE(child.good());
+ child.Put(ID, child_id);
+ child.Put(BASE_VERSION, 1);
+ child.Put(IS_BOOKMARK_OBJECT, true);
WriteTestDataToEntry(&wtrans, &child);
}
- mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 10, 10);
- mock_server_->AddUpdateBookmark(child_id_, parent_id_, "test.htm", 10, 10);
+ mock_server_->AddUpdateDirectory(parent_id, root_id_, "Folder", 10, 10);
+ mock_server_->AddUpdateBookmark(child_id, parent_id, "test.htm", 10, 10);
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
@@ -2171,7 +1797,7 @@ TEST_F(SyncerTest, ParentAndChildBothMatch) {
Directory::ChildHandles children;
dir->GetChildHandles(&trans, root_id_, &children);
EXPECT_TRUE(1 == children.size());
- dir->GetChildHandles(&trans, parent_id_, &children);
+ dir->GetChildHandles(&trans, parent_id, &children);
EXPECT_TRUE(1 == children.size());
Directory::UnappliedUpdateMetaHandles unapplied;
dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
@@ -2241,12 +1867,15 @@ TEST_F(SyncerTest, DeletingEntryInFolder) {
// This test is a little fake.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+
+ int64 existing_metahandle;
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("existing"));
ASSERT_TRUE(entry.good());
entry.Put(IS_DIR, true);
entry.Put(IS_UNSYNCED, true);
+ existing_metahandle = entry.Get(META_HANDLE);
}
syncer_->SyncShare(state_.get());
{
@@ -2256,7 +1885,7 @@ TEST_F(SyncerTest, DeletingEntryInFolder) {
newfolder.Put(IS_DIR, true);
newfolder.Put(IS_UNSYNCED, true);
- MutableEntry existing(&trans, GET_BY_PATH, PSTR("existing"));
+ MutableEntry existing(&trans, GET_BY_HANDLE, existing_metahandle);
ASSERT_TRUE(existing.good());
existing.Put(PARENT_ID, newfolder.Get(ID));
existing.Put(IS_UNSYNCED, true);
@@ -2270,10 +1899,11 @@ TEST_F(SyncerTest, DeletingEntryInFolder) {
EXPECT_TRUE(0 == status.conflicting_commits());
}
-// TODO(sync): Is this test useful anymore?
TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+ int64 newfolder_metahandle;
+
mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
syncer_->SyncShare();
{
@@ -2281,15 +1911,15 @@ TEST_F(SyncerTest, DeletingEntryWithLocalEdits) {
MutableEntry newfolder(&trans, CREATE, ids_.FromNumber(1), PSTR("local"));
ASSERT_TRUE(newfolder.good());
newfolder.Put(IS_UNSYNCED, true);
+ newfolder_metahandle = newfolder.Get(META_HANDLE);
}
mock_server_->AddUpdateDirectory(1, 0, "bob", 2, 20);
mock_server_->SetLastUpdateDeleted();
syncer_->SyncShare(SYNCER_BEGIN, APPLY_UPDATES);
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry_by_path(&trans, syncable::GET_BY_PATH,
- PathString(PSTR("bob")) + kPathSeparator + PSTR("local"));
- ASSERT_TRUE(entry_by_path.good());
+ Entry entry(&trans, syncable::GET_BY_HANDLE, newfolder_metahandle);
+ ASSERT_TRUE(entry.good());
}
}
@@ -2306,17 +1936,17 @@ TEST_F(SyncerTest, FolderSwapUpdate) {
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("fred") == id1.Get(NAME));
+ EXPECT_TRUE(PSTR("fred") == id1.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("bob") == id2.Get(NAME));
+ EXPECT_TRUE(PSTR("bob") == id2.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
}
syncer_events_.clear();
}
-TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
+TEST_F(SyncerTest, NameCollidingFolderSwapWorksFine) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
@@ -2327,15 +1957,15 @@ TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("bob") == id1.Get(NAME));
+ EXPECT_TRUE(PSTR("bob") == id1.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("fred") == id2.Get(NAME));
+ EXPECT_TRUE(PSTR("fred") == id2.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
ASSERT_TRUE(id3.good());
- EXPECT_TRUE(PSTR("alice") == id3.Get(NAME));
+ EXPECT_TRUE(PSTR("alice") == id3.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
}
mock_server_->AddUpdateDirectory(1024, 0, "bob", 2, 20);
@@ -2346,226 +1976,20 @@ TEST_F(SyncerTest, CorruptUpdateBadFolderSwapUpdate) {
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("bob") == id1.Get(NAME));
+ EXPECT_TRUE(PSTR("fred") == id1.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("fred") == id2.Get(NAME));
+ EXPECT_TRUE(PSTR("bob") == id2.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
Entry id3(&trans, GET_BY_ID, ids_.FromNumber(4096));
ASSERT_TRUE(id3.good());
- EXPECT_TRUE(PSTR("alice") == id3.Get(NAME));
+ EXPECT_TRUE(PSTR("bob") == id3.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
}
syncer_events_.clear();
}
-// TODO(chron): New set of folder swap commit tests that don't rely on
-// transactional commits.
-TEST_F(SyncerTest, DISABLED_FolderSwapCommit) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(7801, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(1024, 0, "fred", 1, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
- MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
- EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
- id1.Put(IS_UNSYNCED, true);
- id2.Put(IS_UNSYNCED, true);
- }
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- ASSERT_TRUE(2 == mock_server_->commit_messages().size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(7801));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("fred") == id1.Get(NAME));
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_FALSE(id1.Get(IS_UNSYNCED));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(1024));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("bob") == id2.Get(NAME));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- EXPECT_FALSE(id2.Get(IS_UNSYNCED));
- }
- syncer_events_.clear();
-}
-
-// TODO(chron): New set of folder swap commit tests that don't rely on
-// transactional commits.
-TEST_F(SyncerTest, DISABLED_DualFolderSwapCommit) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
- mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- MutableEntry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- ASSERT_TRUE(id3.good());
- ASSERT_TRUE(id4.good());
- EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
- EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
- EXPECT_FALSE(id3.Put(NAME, PSTR("greg")));
- EXPECT_TRUE(id3.Put(NAME, PSTR("temp")));
- EXPECT_TRUE(id4.Put(NAME, PSTR("sue")));
- EXPECT_TRUE(id3.Put(NAME, PSTR("greg")));
- id1.Put(IS_UNSYNCED, true);
- id2.Put(IS_UNSYNCED, true);
- id3.Put(IS_UNSYNCED, true);
- id4.Put(IS_UNSYNCED, true);
- }
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- ASSERT_TRUE(4 == mock_server_->commit_messages().size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("fred") == id1.Get(NAME));
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_FALSE(id1.Get(IS_UNSYNCED));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("bob") == id2.Get(NAME));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- EXPECT_FALSE(id2.Get(IS_UNSYNCED));
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(id3.good());
- EXPECT_TRUE(PSTR("greg") == id3.Get(NAME));
- EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
- EXPECT_FALSE(id3.Get(IS_UNSYNCED));
- Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(id4.good());
- EXPECT_TRUE(PSTR("sue") == id4.Get(NAME));
- EXPECT_TRUE(root_id_ == id4.Get(PARENT_ID));
- EXPECT_FALSE(id4.Get(IS_UNSYNCED));
- }
- syncer_events_.clear();
-}
-
-// TODO(chron): New set of folder swap commit tests that don't rely on
-// transactional commits.
-TEST_F(SyncerTest, DISABLED_TripleFolderRotateCommit) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- MutableEntry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- ASSERT_TRUE(id3.good());
- EXPECT_FALSE(id1.Put(NAME, PSTR("sue")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
- EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
- EXPECT_TRUE(id3.Put(NAME, PSTR("fred")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("sue")));
- id1.Put(IS_UNSYNCED, true);
- id2.Put(IS_UNSYNCED, true);
- id3.Put(IS_UNSYNCED, true);
- }
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- ASSERT_TRUE(2 == mock_server_->commit_messages().size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("sue") == id1.Get(NAME));
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_FALSE(id1.Get(IS_UNSYNCED));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("bob") == id2.Get(NAME));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- EXPECT_FALSE(id2.Get(IS_UNSYNCED));
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(id3.good());
- EXPECT_TRUE(PSTR("fred") == id3.Get(NAME));
- EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
- EXPECT_FALSE(id3.Get(IS_UNSYNCED));
- }
- syncer_events_.clear();
-}
-
-// TODO(chron): New set of folder swap commit tests that don't rely on
-// transactional commits.
-TEST_F(SyncerTest, DISABLED_ServerAndClientSwap) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(3, 0, "sue", 1, 10);
- mock_server_->AddUpdateDirectory(4, 0, "greg", 1, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- MutableEntry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id1.good());
- ASSERT_TRUE(id2.good());
- EXPECT_FALSE(id1.Put(NAME, PSTR("fred")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("temp")));
- EXPECT_TRUE(id2.Put(NAME, PSTR("bob")));
- EXPECT_TRUE(id1.Put(NAME, PSTR("fred")));
- id1.Put(IS_UNSYNCED, true);
- id2.Put(IS_UNSYNCED, true);
- }
- mock_server_->set_conflict_all_commits(true);
- mock_server_->AddUpdateDirectory(3, 0, "greg", 2, 20);
- mock_server_->AddUpdateDirectory(4, 0, "sue", 2, 20);
- syncer_->SyncShare();
- ASSERT_TRUE(2 == mock_server_->commit_messages().size());
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry id1(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(id1.good());
- EXPECT_TRUE(PSTR("fred") == id1.Get(NAME));
- EXPECT_TRUE(root_id_ == id1.Get(PARENT_ID));
- EXPECT_FALSE(id1.Get(IS_UNSYNCED));
- Entry id2(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(id2.good());
- EXPECT_TRUE(PSTR("bob") == id2.Get(NAME));
- EXPECT_TRUE(root_id_ == id2.Get(PARENT_ID));
- EXPECT_FALSE(id2.Get(IS_UNSYNCED));
- Entry id3(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(id3.good());
- EXPECT_TRUE(PSTR("greg") == id3.Get(NAME));
- EXPECT_TRUE(root_id_ == id3.Get(PARENT_ID));
- EXPECT_FALSE(id3.Get(IS_UNSYNCED));
- Entry id4(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(id4.good());
- EXPECT_TRUE(PSTR("sue") == id4.Get(NAME));
- EXPECT_TRUE(root_id_ == id4.Get(PARENT_ID));
- EXPECT_FALSE(id4.Get(IS_UNSYNCED));
- }
- syncer_events_.clear();
-}
-
TEST_F(SyncerTest, CommitManyItemsInOneGo) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
uint32 max_batches = 3;
@@ -2591,44 +2015,57 @@ TEST_F(SyncerTest, CommitManyItemsInOneGo) {
TEST_F(SyncerTest, HugeConflict) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- PathString name = PSTR("f");
- int item_count = 30; // We should be able to do 300 or 3000 w/o issue.
+ int item_count = 300; // We should be able to do 300 or 3000 w/o issue.
CHECK(dir.good());
+
+ syncable::Id parent_id = ids_.NewServerId();
+ syncable::Id last_id = parent_id;
+ vector<syncable::Id> tree_ids;
+
+ // Create a lot of updates for which the parent does not exist yet.
+ // Generate a huge deep tree which should all fail to apply at first.
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- syncable::Id last_id = trans.root_id();
- for (int i = 0; i < item_count ; i++) {
- MutableEntry e(&trans, CREATE, last_id, name);
- e.Put(IS_UNSYNCED, true);
- e.Put(IS_DIR, true);
- last_id = e.Get(ID);
+ for (int i = 0; i < item_count; i++) {
+ syncable::Id next_id = ids_.NewServerId();
+ tree_ids.push_back(next_id);
+ mock_server_->AddUpdateDirectory(next_id, last_id, "BOB", 2, 20);
+ last_id = next_id;
}
}
+
syncer_->SyncShare();
- CHECK(dir.good());
+
+ // Check they're in the expected conflict state.
{
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, root_id_, name);
- syncable::Id in_root = e.Get(ID);
- syncable::Id last_id = e.Get(ID);
- for (int i = 0; i < item_count - 1 ; i++) {
- MutableEntry e(&trans, GET_BY_PARENTID_AND_NAME, last_id, name);
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ for (int i = 0; i < item_count; i++) {
+ Entry e(&trans, GET_BY_ID, tree_ids[i]);
+ // They should all exist but none should be applied.
ASSERT_TRUE(e.good());
- mock_server_->AddUpdateDirectory(in_root, root_id_, "BOB", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- if (0 == i)
- e.Put(IS_UNSYNCED, true);
- last_id = e.Get(ID);
+ EXPECT_TRUE(e.Get(IS_DEL));
+ EXPECT_TRUE(e.Get(IS_UNAPPLIED_UPDATE));
}
}
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- syncer_->SyncShare();
+
+ // Add the missing parent directory.
+ mock_server_->AddUpdateDirectory(parent_id, TestIdFactory::root(),
+ "BOB", 2, 20);
syncer_->SyncShare();
- CHECK(dir.good());
+
+ // Now they should all be OK.
+ {
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ for (int i = 0; i < item_count; i++) {
+ Entry e(&trans, GET_BY_ID, tree_ids[i]);
+ ASSERT_TRUE(e.good());
+ EXPECT_FALSE(e.Get(IS_DEL));
+ EXPECT_FALSE(e.Get(IS_UNAPPLIED_UPDATE));
+ }
+ }
}
-TEST_F(SyncerTest, CaseChangeNameClashConflict) {
+TEST_F(SyncerTest, DontCrashOnCaseChange) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
@@ -2656,66 +2093,6 @@ TEST_F(SyncerTest, UnsyncedItemAndUpdate) {
syncer_events_.clear();
}
-TEST_F(SyncerTest, FolderMergeWithChildNameClash) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- syncable::Id local_folder_id, root_id;
- mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder2", 10, 10);
- mock_server_->AddUpdateBookmark(child_id_, parent_id_, "Bookmark", 10, 10);
- syncer_->SyncShare();
- int64 local_folder_handle;
- {
- WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry parent(&wtrans, CREATE, root_id_, PSTR("Folder"));
- ASSERT_TRUE(parent.good());
- local_folder_id = parent.Get(ID);
- local_folder_handle = parent.Get(META_HANDLE);
- parent.Put(IS_DIR, true);
- parent.Put(IS_UNSYNCED, true);
- MutableEntry child(&wtrans, CREATE, parent.Get(ID), PSTR("Bookmark"));
- ASSERT_TRUE(child.good());
- WriteTestDataToEntry(&wtrans, &child);
- }
- mock_server_->AddUpdateDirectory(parent_id_, root_id_, "Folder", 20, 20);
- mock_server_->set_conflict_all_commits(true);
- LoopSyncShare(syncer_);
- LoopSyncShare(syncer_);
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Directory::ChildHandles children;
- dir->GetChildHandles(&trans, root_id_, &children);
- ASSERT_TRUE(2 == children.size());
- Entry parent(&trans, GET_BY_ID, parent_id_);
- ASSERT_TRUE(parent.good());
- EXPECT_TRUE(parent.Get(NAME) == PSTR("Folder"));
- if (local_folder_handle == children[0]) {
- EXPECT_TRUE(children[1] == parent.Get(META_HANDLE));
- } else {
- EXPECT_TRUE(children[0] == parent.Get(META_HANDLE));
- EXPECT_TRUE(children[1] == local_folder_handle);
- }
- dir->GetChildHandles(&trans, local_folder_id, &children);
- EXPECT_TRUE(1 == children.size());
- dir->GetChildHandles(&trans, parent_id_, &children);
- EXPECT_TRUE(1 == children.size());
- Directory::UnappliedUpdateMetaHandles unapplied;
- dir->GetUnappliedUpdateMetaHandles(&trans, &unapplied);
- EXPECT_TRUE(0 == unapplied.size());
- syncable::Directory::UnsyncedMetaHandles unsynced;
- dir->GetUnsyncedMetaHandles(&trans, &unsynced);
- EXPECT_TRUE(2 == unsynced.size());
- }
- mock_server_->set_conflict_all_commits(false);
- syncer_->SyncShare();
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- syncable::Directory::UnsyncedMetaHandles unsynced;
- dir->GetUnsyncedMetaHandles(&trans, &unsynced);
- EXPECT_TRUE(0 == unsynced.size());
- }
- syncer_events_.clear();
-}
-
TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2754,7 +2131,7 @@ TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
ASSERT_TRUE(A.good());
A.Put(IS_UNSYNCED, true);
ASSERT_TRUE(A.Put(PARENT_ID, ids_.FromNumber(2)));
- ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, PSTR("B")));
}
mock_server_->AddUpdateDirectory(2, 1, "A", 20, 20);
mock_server_->set_conflict_all_commits(true);
@@ -2766,8 +2143,8 @@ TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
ASSERT_TRUE(A.good());
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
- EXPECT_TRUE(A.Get(NAME) == PSTR("B"));
- EXPECT_TRUE(B.Get(NAME) == PSTR("B"));
+ EXPECT_TRUE(A.Get(NON_UNIQUE_NAME) == PSTR("B"));
+ EXPECT_TRUE(B.Get(NON_UNIQUE_NAME) == PSTR("B"));
}
}
@@ -2786,11 +2163,11 @@ TEST_F(SyncerTest, ConflictSetClassificationError) {
ASSERT_TRUE(A.good());
A.Put(IS_UNSYNCED, true);
A.Put(IS_UNAPPLIED_UPDATE, true);
- A.Put(SERVER_NAME, PSTR("B"));
+ A.Put(SERVER_NON_UNIQUE_NAME, PSTR("B"));
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.Put(IS_UNAPPLIED_UPDATE, true);
- B.Put(SERVER_NAME, PSTR("A"));
+ B.Put(SERVER_NON_UNIQUE_NAME, PSTR("A"));
}
syncer_->SyncShare();
syncer_events_.clear();
@@ -2812,9 +2189,9 @@ TEST_F(SyncerTest, SwapEntryNames) {
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.Put(IS_UNSYNCED, true);
- ASSERT_TRUE(A.Put(NAME, PSTR("C")));
- ASSERT_TRUE(B.Put(NAME, PSTR("A")));
- ASSERT_TRUE(A.Put(NAME, PSTR("B")));
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, PSTR("C")));
+ ASSERT_TRUE(B.Put(NON_UNIQUE_NAME, PSTR("A")));
+ ASSERT_TRUE(A.Put(NON_UNIQUE_NAME, PSTR("B")));
}
syncer_->SyncShare();
syncer_events_.clear();
@@ -2881,12 +2258,16 @@ TEST_F(SyncerTest, FixDirectoryLoopConflict) {
TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+
+ int64 bob_metahandle;
+
mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(bob.good());
+ bob_metahandle = bob.Get(META_HANDLE);
WriteTestDataToEntry(&trans, &bob);
}
mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
@@ -2896,7 +2277,7 @@ TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
+ Entry bob(&trans, GET_BY_HANDLE, bob_metahandle);
ASSERT_TRUE(bob.good());
EXPECT_TRUE(bob.Get(IS_UNSYNCED));
EXPECT_FALSE(bob.Get(ID).ServerKnows());
@@ -2906,71 +2287,60 @@ TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
syncer_events_.clear();
}
-// This test is disabled because we actually enforce the opposite behavior in:
-// ConflictResolverMergesLocalDeleteAndServerUpdate for bookmarks.
-TEST_F(SyncerTest, DISABLED_ResolveWeDeletedTheyWrote) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
- syncer_->SyncShare();
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateBookmark(1, 0, "bob", 2, 10);
- mock_server_->set_conflict_all_commits(true);
- syncer_->SyncShare();
- syncer_->SyncShare();
- {
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("bob"));
- ASSERT_TRUE(bob.good());
- EXPECT_TRUE(bob.Get(ID) == ids_.FromNumber(1));
- EXPECT_FALSE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_DEL));
- }
- syncer_events_.clear();
-}
-
TEST_F(SyncerTest, ServerDeletingFolderWeHaveMovedSomethingInto) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+
+ syncable::Id bob_id = ids_.NewServerId();
+ syncable::Id fred_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
+ bob.Put(PARENT_ID, fred_id);
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(bob.Get(NON_UNIQUE_NAME) == PSTR("bob"));
+ EXPECT_NE(bob.Get(PARENT_ID), fred_id);
+
+ // Entry was deleted and reborn.
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+
+ // Reborn fred
+ Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
ASSERT_TRUE(fred.good());
+ EXPECT_TRUE(fred.Get(PARENT_ID) == trans.root_id());
+ EXPECT_EQ(PSTR("fred"), fred.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
}
syncer_events_.clear();
}
// TODO(ncarter): This test is bogus, but it actually seems to hit an
// interesting case the 4th time SyncShare is called.
+// TODO(chron): The fourth time that SyncShare is called it crashes.
+// This seems to be due to a bug in the conflict set building logic.
TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -3011,7 +2381,9 @@ TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
ReadTransaction trans(dir, __FILE__, __LINE__);
Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("fred"));
+ Id fred_id =
+ GetOnlyEntryWithName(&trans, TestIdFactory::root(), PSTR("fred"));
+ Entry fred(&trans, GET_BY_ID, fred_id);
ASSERT_TRUE(fred.good());
EXPECT_FALSE(fred.Get(IS_UNSYNCED));
EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
@@ -3021,32 +2393,52 @@ TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
syncer_events_.clear();
}
+
TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+
+ syncable::Id bob_id = ids_.NewServerId();
+ syncable::Id fred_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry fred(&trans, GET_BY_ID, fred_id);
+ ASSERT_TRUE(fred.good());
+
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
+ bob.Put(PARENT_ID, fred_id);
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
- ASSERT_TRUE(fred.good());
+
+ // Entry was deleted by server. We'll make a new one though with a new ID.
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+
+ // Fred is reborn with a local ID.
+ Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
+ EXPECT_EQ(PSTR("fred"), fred.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(TestIdFactory::root(), fred.Get(PARENT_ID));
EXPECT_TRUE(fred.Get(IS_UNSYNCED));
EXPECT_FALSE(fred.Get(ID).ServerKnows());
+
+ // Bob needs to update his parent.
EXPECT_TRUE(bob.Get(IS_UNSYNCED));
EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
@@ -3056,60 +2448,97 @@ TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
syncer_events_.clear();
}
-namespace {
+class FolderMoveDeleteRenameTest : public SyncerTest {
+ public:
+ FolderMoveDeleteRenameTest() : move_bob_count_(0), done_(false) {}
-int move_bob_count;
+ static const int64 bob_id_number = 1;
+ static const int64 fred_id_number = 2;
-bool MoveBobIntoID2(Directory* dir) {
- if (--move_bob_count > 0)
- return false;
- if (move_bob_count == 0) {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- Entry alice(&trans, GET_BY_ID, TestIdFactory::FromNumber(2));
- CHECK(alice.good());
- CHECK(!alice.Get(IS_DEL));
- MutableEntry bob(&trans, GET_BY_ID, TestIdFactory::FromNumber(1));
- CHECK(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, alice.Get(ID));
- return true;
+ void MoveBobIntoID2Runner() {
+ if (!done_) {
+ done_ = MoveBobIntoID2();
+ }
}
- return false;
-}
-} // namespace
+ protected:
+ int move_bob_count_;
+ bool done_;
-TEST_F(SyncerTest,
+ bool MoveBobIntoID2() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ CHECK(dir.good());
+
+ if (--move_bob_count_ > 0) {
+ return false;
+ }
+
+ if (move_bob_count_ == 0) {
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ Entry alice(&trans, GET_BY_ID,
+ TestIdFactory::FromNumber(fred_id_number));
+ CHECK(alice.good());
+ CHECK(!alice.Get(IS_DEL));
+ MutableEntry bob(&trans, GET_BY_ID,
+ TestIdFactory::FromNumber(bob_id_number));
+ CHECK(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(PARENT_ID, alice.Get(ID));
+ return true;
+ }
+ return false;
+ }
+};
+
+TEST_F(FolderMoveDeleteRenameTest,
WeMovedSomethingIntoAFolderServerHasDeletedAndWeRenamed) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+
+ const syncable::Id bob_id = TestIdFactory::FromNumber(
+ FolderMoveDeleteRenameTest::bob_id_number);
+ const syncable::Id fred_id = TestIdFactory::FromNumber(
+ FolderMoveDeleteRenameTest::fred_id_number);
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
+ MutableEntry fred(&trans, GET_BY_ID, fred_id);
ASSERT_TRUE(fred.good());
fred.Put(IS_UNSYNCED, true);
- fred.Put(NAME, PSTR("Alice"));
+ fred.Put(NON_UNIQUE_NAME, PSTR("Alice"));
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
// This test is a little brittle. We want to move the item into the folder
// such that we think we're dealing with a simple conflict, but in reality
// it's actually a conflict set.
- move_bob_count = 2;
- mock_server_->SetMidCommitCallbackFunction(MoveBobIntoID2);
+ move_bob_count_ = 2;
+ mock_server_->SetMidCommitCallback(
+ NewCallback<FolderMoveDeleteRenameTest>(this,
+ &FolderMoveDeleteRenameTest::MoveBobIntoID2Runner));
syncer_->SyncShare();
syncer_->SyncShare();
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry alice(&trans, GET_BY_PATH, PSTR("Alice"));
+
+ // Old entry is dead
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+
+ // New ID is created to fill parent folder, named correctly
+ Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
ASSERT_TRUE(alice.good());
+ EXPECT_EQ(PSTR("Alice"), alice.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(alice.Get(IS_UNSYNCED));
EXPECT_FALSE(alice.Get(ID).ServerKnows());
EXPECT_TRUE(bob.Get(IS_UNSYNCED));
@@ -3126,42 +2555,56 @@ TEST_F(SyncerTest,
WeMovedADirIntoAndCreatedAnEntryInAFolderServerHasDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
+
+ syncable::Id bob_id = ids_.NewServerId();
+ syncable::Id fred_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 1, 10);
syncer_->SyncShare();
syncable::Id new_item_id;
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
- MutableEntry new_item(&trans, CREATE, ids_.FromNumber(2), PSTR("new_item"));
+ bob.Put(PARENT_ID, fred_id);
+ MutableEntry new_item(&trans, CREATE, fred_id, PSTR("new_item"));
WriteTestDataToEntry(&trans, &new_item);
new_item_id = new_item.Get(ID);
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_NE(bob.Get(PARENT_ID), fred_id);
+
+ // Was recreated. Old one shouldn't exist.
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+
+ Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
ASSERT_TRUE(fred.good());
- PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
- 'n', 'e', 'w', '_', 'i', 't', 'e', 'm', 0};
- Entry new_item(&trans, GET_BY_PATH, path);
- EXPECT_TRUE(new_item.good());
EXPECT_TRUE(fred.Get(IS_UNSYNCED));
EXPECT_FALSE(fred.Get(ID).ServerKnows());
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
+ EXPECT_EQ(PSTR("fred"), fred.Get(NON_UNIQUE_NAME));
EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
+
+ Entry new_item(&trans, GET_BY_ID, new_item_id);
+ ASSERT_TRUE(new_item.good());
+ EXPECT_EQ(new_item.Get(PARENT_ID), fred.Get(ID));
}
syncer_events_.clear();
}
@@ -3359,60 +2802,80 @@ TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted2) {
syncer_events_.clear();
}
-namespace {
-int countown_till_delete = 0;
-
-void DeleteSusanInRoot(Directory* dir) {
- ASSERT_GT(countown_till_delete, 0);
- if (0 != --countown_till_delete)
- return;
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry susan(&trans, GET_BY_PATH, PSTR("susan"));
- Directory::ChildHandles children;
- dir->GetChildHandles(&trans, susan.Get(ID), &children);
- ASSERT_TRUE(0 == children.size());
- susan.Put(IS_DEL, true);
- susan.Put(IS_UNSYNCED, true);
-}
+class SusanDeletingTest : public SyncerTest {
+ public:
+ SusanDeletingTest() : countdown_till_delete_(0) {}
-} // namespace
+ static const int64 susan_int_id_ = 4;
-TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3) {
+ void DeleteSusanInRoot() {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+
+ const syncable::Id susan_id = TestIdFactory::FromNumber(susan_int_id_);
+ ASSERT_GT(countdown_till_delete_, 0);
+ if (0 != --countdown_till_delete_)
+ return;
+ WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
+ MutableEntry susan(&trans, GET_BY_ID, susan_id);
+ Directory::ChildHandles children;
+ dir->GetChildHandles(&trans, susan.Get(ID), &children);
+ ASSERT_TRUE(0 == children.size());
+ susan.Put(IS_DEL, true);
+ susan.Put(IS_UNSYNCED, true);
+ }
+
+ protected:
+ int countdown_till_delete_;
+};
+
+TEST_F(SusanDeletingTest,
+ NewServerItemInAFolderHierarchyWeHaveDeleted3) {
// Same as 2, except we deleted the folder the set is in between set building
// and conflict resolution.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
- mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
+
+ const syncable::Id bob_id = TestIdFactory::FromNumber(1);
+ const syncable::Id joe_id = TestIdFactory::FromNumber(2);
+ const syncable::Id fred_id = TestIdFactory::FromNumber(3);
+ const syncable::Id susan_id = TestIdFactory::FromNumber(susan_int_id_);
+
+ mock_server_->AddUpdateDirectory(susan_id, TestIdFactory::root(),
+ "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(bob_id, susan_id, "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(joe_id, bob_id, "joe", 1, 10);
LoopSyncShare(syncer_);
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
bob.Put(IS_DEL, true);
- MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+
+ MutableEntry joe(&trans, GET_BY_ID, joe_id);
ASSERT_TRUE(joe.good());
joe.Put(IS_UNSYNCED, true);
joe.Put(IS_DEL, true);
}
- mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, joe_id, "fred", 2, 20);
mock_server_->set_conflict_all_commits(true);
- countown_till_delete = 2;
- syncer_->pre_conflict_resolution_function_ = DeleteSusanInRoot;
+ countdown_till_delete_ = 2;
+ syncer_->pre_conflict_resolution_closure_ =
+ NewCallback<SusanDeletingTest>(this,
+ &SusanDeletingTest::DeleteSusanInRoot);
syncer_->SyncShare();
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ Entry joe(&trans, GET_BY_ID, joe_id);
ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ Entry fred(&trans, GET_BY_ID, fred_id);
ASSERT_TRUE(fred.good());
- Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ Entry susan(&trans, GET_BY_ID, susan_id);
ASSERT_TRUE(susan.good());
EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
@@ -3423,19 +2886,19 @@ TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3) {
EXPECT_TRUE(bob.Get(IS_UNSYNCED));
EXPECT_TRUE(joe.Get(IS_UNSYNCED));
}
- EXPECT_TRUE(0 == countown_till_delete);
- syncer_->pre_conflict_resolution_function_ = 0;
+ EXPECT_TRUE(0 == countdown_till_delete_);
+ syncer_->pre_conflict_resolution_closure_ = NULL;
LoopSyncShare(syncer_);
LoopSyncShare(syncer_);
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
+ Entry joe(&trans, GET_BY_ID, joe_id);
ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
+ Entry fred(&trans, GET_BY_ID, fred_id);
ASSERT_TRUE(fred.good());
- Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
+ Entry susan(&trans, GET_BY_ID, susan_id);
ASSERT_TRUE(susan.good());
EXPECT_TRUE(susan.Get(IS_UNSYNCED));
EXPECT_FALSE(fred.Get(IS_UNSYNCED));
@@ -3456,103 +2919,146 @@ TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted3) {
TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+
+ const syncable::Id bob_id = ids_.NewServerId();
+ const syncable::Id fred_id = ids_.NewServerId();
+ const syncable::Id alice_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(alice_id, fred_id, "alice", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ bob.Put(PARENT_ID, alice_id); // Move into alice.
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
- mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->AddUpdateDirectory(alice_id, TestIdFactory::root(),
+ "alice", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
{
+ // Bob is the entry at the bottom of the tree.
+ // The tree should be regenerated and old IDs removed.
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_PATH, PSTR("fred"));
- ASSERT_TRUE(fred.good());
- PathChar path[] = {'f', 'r', 'e', 'd', *kPathSeparator,
- 'a', 'l', 'i', 'c', 'e', 0};
- Entry alice(&trans, GET_BY_PATH, path);
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED));
+
+ // Old one should be deleted, but new one should have been made.
+ Entry dead_alice(&trans, GET_BY_ID, alice_id);
+ EXPECT_FALSE(dead_alice.good());
+ EXPECT_NE(bob.Get(PARENT_ID), alice_id);
+
+ // Newly born alice
+ Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
ASSERT_TRUE(alice.good());
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
EXPECT_TRUE(alice.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
EXPECT_FALSE(alice.Get(ID).ServerKnows());
- EXPECT_TRUE(alice.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == alice.Get(ID));
- EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
+ EXPECT_TRUE(alice.Get(NON_UNIQUE_NAME) == PSTR("alice"));
+
+ // Alice needs a parent as well. Old parent should have been erased.
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+ EXPECT_NE(alice.Get(PARENT_ID), fred_id);
+
+ Entry fred(&trans, GET_BY_ID, alice.Get(PARENT_ID));
+ ASSERT_TRUE(fred.good());
+ EXPECT_EQ(fred.Get(PARENT_ID), TestIdFactory::root());
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(fred.Get(NON_UNIQUE_NAME) == PSTR("fred"));
}
syncer_events_.clear();
}
TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted2) {
- // The difference here is that the hierarchy's not in the root. We have
+ // The difference here is that the hierarchy is not in the root. We have
// another entry that shouldn't be touched.
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
- mock_server_->AddUpdateDirectory(2, 4, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(3, 2, "alice", 1, 10);
+
+ const syncable::Id bob_id = ids_.NewServerId();
+ const syncable::Id fred_id = ids_.NewServerId();
+ const syncable::Id alice_id = ids_.NewServerId();
+ const syncable::Id susan_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
+ "bob", 1, 10);
+ mock_server_->AddUpdateDirectory(susan_id, TestIdFactory::root(),
+ "susan", 1, 10);
+ mock_server_->AddUpdateDirectory(fred_id, susan_id, "fred", 1, 10);
+ mock_server_->AddUpdateDirectory(alice_id, fred_id, "alice", 1, 10);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(3)); // Move into alice.
+ bob.Put(PARENT_ID, alice_id); // Move into alice.
}
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
+ mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
+ "fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
- mock_server_->AddUpdateDirectory(3, 0, "alice", 2, 20);
+ mock_server_->AddUpdateDirectory(alice_id, TestIdFactory::root(),
+ "alice", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
syncer_->SyncShare();
syncer_->SyncShare();
{
+ // Root
+ // |- Susan
+ // |- Fred
+ // |- Alice
+ // |- Bob
+
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
+ Entry bob(&trans, GET_BY_ID, bob_id);
ASSERT_TRUE(bob.good());
- PathChar path[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
- 'f', 'r', 'e', 'd', 0};
- Entry fred(&trans, GET_BY_PATH, path);
- ASSERT_TRUE(fred.good());
- PathChar path2[] = {'s', 'u', 's', 'a', 'n', *kPathSeparator,
- 'f', 'r', 'e', 'd', *kPathSeparator,
- 'a', 'l', 'i', 'c', 'e', 0};
- Entry alice(&trans, GET_BY_PATH, path2);
- ASSERT_TRUE(alice.good());
- Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(susan.good());
- Entry susan_by_path(&trans, GET_BY_PATH, PSTR("susan"));
- ASSERT_TRUE(susan.good());
- EXPECT_FALSE(susan.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
+ EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(bob.Get(IS_UNSYNCED)); // Parent changed
+ EXPECT_NE(bob.Get(PARENT_ID), alice_id);
+
+ // New one was born, this is the old one
+ Entry dead_alice(&trans, GET_BY_ID, alice_id);
+ EXPECT_FALSE(dead_alice.good());
+
+ // Newly born
+ Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
EXPECT_TRUE(alice.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
EXPECT_FALSE(alice.Get(ID).ServerKnows());
- EXPECT_TRUE(alice.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == alice.Get(ID));
- EXPECT_TRUE(fred.Get(PARENT_ID) == susan.Get(ID));
- EXPECT_TRUE(susan.Get(PARENT_ID) == root_id_);
+ EXPECT_NE(alice.Get(PARENT_ID), fred_id); // This fred was deleted
+
+ // New one was born, this is the old one
+ Entry dead_fred(&trans, GET_BY_ID, fred_id);
+ EXPECT_FALSE(dead_fred.good());
+
+ // Newly born
+ Entry fred(&trans, GET_BY_ID, alice.Get(PARENT_ID));
+ EXPECT_TRUE(fred.Get(IS_UNSYNCED));
EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_FALSE(fred.Get(ID).ServerKnows());
+ EXPECT_TRUE(fred.Get(PARENT_ID) == susan_id);
+
+ // Unchanged
+ Entry susan(&trans, GET_BY_ID, susan_id);
+ ASSERT_TRUE(susan.good());
+ EXPECT_FALSE(susan.Get(IS_UNSYNCED));
+ EXPECT_TRUE(susan.Get(PARENT_ID) == root_id_);
EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
}
syncer_events_.clear();
@@ -3585,34 +3091,6 @@ TEST_F(SyncerTest, DuplicateIDReturn) {
syncer_events_.clear();
}
-// This test is not very useful anymore. It used to trigger a more interesting
-// condition.
-TEST_F(SyncerTest, SimpleConflictOnAnEntry) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, CREATE, trans.root_id(), PSTR("bob"));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- WriteTestDataToEntry(&trans, &bob);
- }
- syncer_->SyncShare();
- syncable::Id bobid;
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry bob(&trans, GET_BY_PATH, PSTR("bob"));
- ASSERT_TRUE(bob.good());
- EXPECT_FALSE(bob.Get(IS_UNSYNCED));
- bob.Put(IS_UNSYNCED, true);
- bobid = bob.Get(ID);
- }
- mock_server_->AddUpdateBookmark(1, 0, "jim", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- SyncRepeatedlyToTriggerConflictResolution(state_.get());
- syncer_events_.clear();
-}
-
TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
@@ -3654,7 +3132,7 @@ TEST_F(SyncerTest, ConflictResolverMergeOverwritesLocalEntry) {
MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, ids_.FromNumber(3));
update.Put(BASE_VERSION, 1);
- update.Put(SERVER_NAME, PSTR("name"));
+ update.Put(SERVER_NON_UNIQUE_NAME, PSTR("name"));
update.Put(PARENT_ID, ids_.FromNumber(0));
update.Put(IS_UNAPPLIED_UPDATE, true);
@@ -3792,13 +3270,17 @@ TEST_F(SyncerTest, MergingExistingItems) {
// In this test a long changelog contains a child at the start of the changelog
// and a parent at the end. While these updates are in progress the client would
// appear stuck.
-TEST_F(SyncerTest, LongChangelistCreatesFakeOrphanedEntries) {
+TEST_F(SyncerTest, LongChangelistWithApplicationConflict) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
const int DEPTH = 400;
+ syncable::Id folder_id = ids_.FromNumber(1);
+
// First we an item in a folder in the root. However the folder won't come
// till much later.
- mock_server_->AddUpdateDirectory(99999, 1, "stuck", 1, 1);
+ syncable::Id stuck_entry_id = TestIdFactory::FromNumber(99999);
+ mock_server_->AddUpdateDirectory(stuck_entry_id,
+ folder_id, "stuck", 1, 1);
mock_server_->SetChangesRemaining(DEPTH - 1);
syncer_->SyncShare(state_.get());
@@ -3808,18 +3290,30 @@ TEST_F(SyncerTest, LongChangelistCreatesFakeOrphanedEntries) {
mock_server_->SetChangesRemaining(DEPTH - i);
syncer_->SyncShare(state_.get());
EXPECT_FALSE(SyncerStuck(state_.get()));
+
+ // Ensure our folder hasn't somehow applied.
+ ReadTransaction trans(dir, __FILE__, __LINE__);
+ Entry child(&trans, GET_BY_ID, stuck_entry_id);
+ EXPECT_TRUE(child.good());
+ EXPECT_TRUE(child.Get(IS_UNAPPLIED_UPDATE));
+ EXPECT_TRUE(child.Get(IS_DEL));
+ EXPECT_FALSE(child.Get(IS_UNSYNCED));
}
+
// And finally the folder.
- mock_server_->AddUpdateDirectory(1, 0, "folder", 1, 1);
+ mock_server_->AddUpdateDirectory(folder_id,
+ TestIdFactory::root(), "folder", 1, 1);
mock_server_->SetChangesRemaining(0);
LoopSyncShare(syncer_);
LoopSyncShare(syncer_);
- // Check that everything's as expected after the commit.
+ // Check that everything is as expected after the commit.
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("folder"));
+ Entry entry(&trans, GET_BY_ID, folder_id);
ASSERT_TRUE(entry.good());
- Entry child(&trans, GET_BY_PARENTID_AND_NAME, entry.Get(ID), PSTR("stuck"));
+ Entry child(&trans, GET_BY_ID, stuck_entry_id);
+ EXPECT_EQ(entry.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ(PSTR("stuck"), child.Get(NON_UNIQUE_NAME));
EXPECT_TRUE(child.good());
}
}
@@ -3835,7 +3329,7 @@ TEST_F(SyncerTest, DontMergeTwoExistingItems) {
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.Put(NAME, PSTR("Copy of base")));
+ EXPECT_TRUE(entry.Put(NON_UNIQUE_NAME, PSTR("Copy of base")));
entry.Put(IS_UNSYNCED, true);
}
mock_server_->AddUpdateBookmark(1, 0, "Copy of base", 50, 50);
@@ -3850,7 +3344,7 @@ TEST_F(SyncerTest, DontMergeTwoExistingItems) {
EXPECT_FALSE(entry2.Get(IS_UNAPPLIED_UPDATE));
EXPECT_TRUE(entry2.Get(IS_UNSYNCED));
EXPECT_FALSE(entry2.Get(IS_DEL));
- EXPECT_NE(entry1.Get(NAME), entry2.Get(NAME));
+ EXPECT_EQ(entry1.Get(NON_UNIQUE_NAME), entry2.Get(NON_UNIQUE_NAME));
}
}
@@ -3906,31 +3400,6 @@ TEST_F(SyncerTest, TestMoveSanitizedNamedFolder) {
syncer_->SyncShare();
}
-TEST_F(SyncerTest, QuicklyMergeDualCreatedHierarchy) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- EXPECT_TRUE(dir.good());
- mock_server_->set_conflict_all_commits(true);
- int depth = 10;
- {
- WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- syncable::Id parent = root_id_;
- for (int i = 0 ; i < depth ; ++i) {
- MutableEntry entry(&trans, CREATE, parent, PSTR("folder"));
- entry.Put(IS_DIR, true);
- entry.Put(IS_UNSYNCED, true);
- parent = entry.Get(ID);
- }
- }
- for (int i = 0 ; i < depth ; ++i) {
- mock_server_->AddUpdateDirectory(i + 1, i, "folder", 1, 1);
- }
- syncer_->SyncShare(state_.get());
- syncer_->SyncShare(state_.get());
- SyncerStatus status(NULL, state_.get());
- EXPECT_LT(status.consecutive_problem_commits(), 5);
- EXPECT_TRUE(0 == dir->unsynced_entity_count());
-}
-
TEST(SortedCollectionsIntersect, SortedCollectionsIntersectTest) {
int negative[] = {-3, -2, -1};
int straddle[] = {-1, 0, 1};
@@ -3973,77 +3442,86 @@ const char kRootId[] = "0";
TEST_F(SyncerTest, DirectoryUpdateTest) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory("in_root_id", kRootId,
+
+ Id in_root_id = ids_.NewServerId();
+ Id in_in_root_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateDirectory(in_root_id, TestIdFactory::root(),
"in_root_name", 2, 2);
- mock_server_->AddUpdateDirectory("in_in_root_id", "in_root_id",
+ mock_server_->AddUpdateDirectory(in_in_root_id, in_root_id,
"in_in_root_name", 3, 3);
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- // Entry will have been dropped.
- Entry by_path(&trans, GET_BY_PATH, PSTR("in_root_name"));
- EXPECT_TRUE(by_path.good());
- Entry by_path2(&trans, GET_BY_PATH, PSTR("in_root_name") +
- PathString(kPathSeparator) +
- PSTR("in_in_root_name"));
- EXPECT_TRUE(by_path2.good());
+ Entry in_root(&trans, GET_BY_ID, in_root_id);
+ ASSERT_TRUE(in_root.good());
+ EXPECT_EQ(PSTR("in_root_name"), in_root.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(TestIdFactory::root(), in_root.Get(PARENT_ID));
+
+ Entry in_in_root(&trans, GET_BY_ID, in_in_root_id);
+ ASSERT_TRUE(in_in_root.good());
+ EXPECT_EQ(PSTR("in_in_root_name"), in_in_root.Get(NON_UNIQUE_NAME));
+ EXPECT_EQ(in_root_id, in_in_root.Get(PARENT_ID));
}
}
TEST_F(SyncerTest, DirectoryCommitTest) {
- syncable::Id in_root, in_dir;
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
+
+ syncable::Id in_root_id, in_dir_id;
+ int64 foo_metahandle;
+ int64 bar_metahandle;
+
{
WriteTransaction wtrans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry parent(&wtrans, syncable::CREATE, root_id_, PSTR("foo"));
ASSERT_TRUE(parent.good());
parent.Put(syncable::IS_UNSYNCED, true);
parent.Put(syncable::IS_DIR, true);
- in_root = parent.Get(syncable::ID);
+ in_root_id = parent.Get(syncable::ID);
+ foo_metahandle = parent.Get(META_HANDLE);
+
MutableEntry child(&wtrans, syncable::CREATE, parent.Get(ID), PSTR("bar"));
ASSERT_TRUE(child.good());
child.Put(syncable::IS_UNSYNCED, true);
child.Put(syncable::IS_DIR, true);
- in_dir = parent.Get(syncable::ID);
+ bar_metahandle = child.Get(META_HANDLE);
+ in_dir_id = parent.Get(syncable::ID);
}
syncer_->SyncShare();
{
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry by_path(&trans, GET_BY_PATH, PSTR("foo"));
- ASSERT_TRUE(by_path.good());
- EXPECT_NE(by_path.Get(syncable::ID), in_root);
- Entry by_path2(&trans, GET_BY_PATH, PSTR("foo") +
- PathString(kPathSeparator) +
- PSTR("bar"));
- ASSERT_TRUE(by_path2.good());
- EXPECT_NE(by_path2.Get(syncable::ID), in_dir);
- }
-}
+ Entry fail_by_old_id_entry(&trans, GET_BY_ID, in_root_id);
+ ASSERT_FALSE(fail_by_old_id_entry.good());
-namespace {
+ Entry foo_entry(&trans, GET_BY_HANDLE, foo_metahandle);
+ ASSERT_TRUE(foo_entry.good());
+ EXPECT_EQ(PSTR("foo"), foo_entry.Get(NON_UNIQUE_NAME));
+ EXPECT_NE(foo_entry.Get(syncable::ID), in_root_id);
-void CheckEntryVersion(syncable::DirectoryManager* dirmgr, PathString name) {
- ScopedDirLookup dir(dirmgr, name);
- ASSERT_TRUE(dir.good());
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PSTR("foo"));
- ASSERT_TRUE(entry.good());
- EXPECT_TRUE(entry.Get(BASE_VERSION) == 1);
+ Entry bar_entry(&trans, GET_BY_HANDLE, bar_metahandle);
+ ASSERT_TRUE(bar_entry.good());
+ EXPECT_EQ(PSTR("bar"), bar_entry.Get(NON_UNIQUE_NAME));
+ EXPECT_NE(bar_entry.Get(syncable::ID), in_dir_id);
+ EXPECT_EQ(foo_entry.Get(syncable::ID), bar_entry.Get(PARENT_ID));
+ }
}
-} // namespace
-
TEST_F(SyncerTest, ConflictSetSizeReducedToOne) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateBookmark(2, 0, "in_root", 1, 1);
+
+ syncable::Id in_root_id = ids_.NewServerId();
+
+ mock_server_->AddUpdateBookmark(in_root_id, TestIdFactory::root(),
+ "in_root", 1, 1);
syncer_->SyncShare();
{
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry oentry(&trans, GET_BY_PATH, PSTR("in_root"));
+ MutableEntry oentry(&trans, GET_BY_ID, in_root_id);
ASSERT_TRUE(oentry.good());
- oentry.Put(NAME, PSTR("old_in_root"));
+ oentry.Put(NON_UNIQUE_NAME, PSTR("old_in_root"));
WriteTestDataToEntry(&trans, &oentry);
MutableEntry entry(&trans, CREATE, trans.root_id(), PSTR("in_root"));
ASSERT_TRUE(entry.good());
@@ -4086,15 +3564,21 @@ TEST_F(SyncerTest, TestClientCommand) {
TEST_F(SyncerTest, EnsureWeSendUpOldParent) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "folder_one", 1, 1);
- mock_server_->AddUpdateDirectory(2, 0, "folder_two", 1, 1);
+
+ syncable::Id folder_one_id = ids_.FromNumber(1);
+ syncable::Id folder_two_id = ids_.FromNumber(2);
+
+ mock_server_->AddUpdateDirectory(folder_one_id, TestIdFactory::root(),
+ "folder_one", 1, 1);
+ mock_server_->AddUpdateDirectory(folder_two_id, TestIdFactory::root(),
+ "folder_two", 1, 1);
syncer_->SyncShare();
{
// A moved entry should send an old parent.
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&trans, GET_BY_PATH, PSTR("folder_one"));
+ MutableEntry entry(&trans, GET_BY_ID, folder_one_id);
ASSERT_TRUE(entry.good());
- entry.Put(PARENT_ID, ids_.FromNumber(2));
+ entry.Put(PARENT_ID, folder_two_id);
entry.Put(IS_UNSYNCED, true);
// A new entry should send no parent.
MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("new_folder"));
@@ -4113,6 +3597,7 @@ TEST_F(SyncerTest, Test64BitVersionSupport) {
CHECK(dir.good());
int64 really_big_int = std::numeric_limits<int64>::max() - 12;
const PathString name(PSTR("ringo's dang orang ran rings around my o-ring"));
+ int64 item_metahandle;
// Try writing max int64 to the version fields of a meta entry.
{
@@ -4121,29 +3606,18 @@ TEST_F(SyncerTest, Test64BitVersionSupport) {
ASSERT_TRUE(entry.good());
entry.Put(syncable::BASE_VERSION, really_big_int);
entry.Put(syncable::SERVER_VERSION, really_big_int);
- entry.Put(syncable::ID, syncable::Id::CreateFromServerId("ID"));
+ entry.Put(syncable::ID, ids_.NewServerId());
+ item_metahandle = entry.Get(META_HANDLE);
}
// Now read it back out and make sure the value is max int64.
ReadTransaction rtrans(dir, __FILE__, __LINE__);
- Entry entry(&rtrans, syncable::GET_BY_PATH, name);
+ Entry entry(&rtrans, syncable::GET_BY_HANDLE, item_metahandle);
ASSERT_TRUE(entry.good());
EXPECT_TRUE(really_big_int == entry.Get(syncable::BASE_VERSION));
}
-TEST_F(SyncerTest, TestDSStoreDirectorySyncsNormally) {
- syncable::Id item_id = parent_id_;
- mock_server_->AddUpdateDirectory(item_id,
- root_id_, ".DS_Store", 1, 1);
- syncer_->SyncShare();
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry ds_dir(&trans, GET_BY_PATH, PSTR(".DS_Store"));
- ASSERT_TRUE(ds_dir.good());
-}
-
TEST_F(SyncerTest, TestSimpleUndelete) {
- Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
mock_server_->set_conflict_all_commits(true);
@@ -4203,7 +3677,7 @@ TEST_F(SyncerTest, TestSimpleUndelete) {
}
TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
- Id id = ids_.MakeServer("undeletion item"), root = ids_.root();
+ Id id = ids_.MakeServer("undeletion item"), root = TestIdFactory::root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
// Let there be a entry, from the server.
@@ -4251,7 +3725,7 @@ TEST_F(SyncerTest, TestUndeleteWithMissingDeleteUpdate) {
TEST_F(SyncerTest, TestUndeleteIgnoreCorrectlyUnappliedUpdate) {
Id id1 = ids_.MakeServer("first"), id2 = ids_.MakeServer("second");
- Id root = ids_.root();
+ Id root = TestIdFactory::root();
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
EXPECT_TRUE(dir.good());
// Duplicate! expect path clashing!
@@ -4296,7 +3770,7 @@ TEST_F(SyncerTest, SingletonTagUpdates) {
ASSERT_TRUE(hurdle.good());
ASSERT_TRUE(!hurdle.Get(IS_DEL));
ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
- ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+ ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == PSTR("bob"));
// Try to lookup by the tagname. These should fail.
Entry tag_alpha(&trans, GET_BY_TAG, PSTR("alpha"));
@@ -4321,18 +3795,18 @@ TEST_F(SyncerTest, SingletonTagUpdates) {
ASSERT_TRUE(tag_alpha.good());
ASSERT_TRUE(!tag_alpha.Get(IS_DEL));
ASSERT_TRUE(tag_alpha.Get(SINGLETON_TAG) == PSTR("alpha"));
- ASSERT_TRUE(tag_alpha.GetName().value() == PSTR("update1"));
+ ASSERT_TRUE(tag_alpha.Get(NON_UNIQUE_NAME) == PSTR("update1"));
Entry tag_bob(&trans, GET_BY_TAG, PSTR("bob"));
ASSERT_TRUE(tag_bob.good());
ASSERT_TRUE(!tag_bob.Get(IS_DEL));
ASSERT_TRUE(tag_bob.Get(SINGLETON_TAG) == PSTR("bob"));
- ASSERT_TRUE(tag_bob.GetName().value() == PSTR("update2"));
+ ASSERT_TRUE(tag_bob.Get(NON_UNIQUE_NAME) == PSTR("update2"));
// The old item should be unchanged.
Entry hurdle(&trans, GET_BY_HANDLE, hurdle_handle);
ASSERT_TRUE(hurdle.good());
ASSERT_TRUE(!hurdle.Get(IS_DEL));
ASSERT_TRUE(hurdle.Get(SINGLETON_TAG).empty());
- ASSERT_TRUE(hurdle.GetName().value() == PSTR("bob"));
+ ASSERT_TRUE(hurdle.Get(NON_UNIQUE_NAME) == PSTR("bob"));
}
}
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
index 6828297..130a351 100644..100755
--- a/chrome/browser/sync/engine/syncer_util.cc
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -32,7 +32,6 @@ using syncable::Entry;
using syncable::ExtendedAttributeKey;
using syncable::GET_BY_HANDLE;
using syncable::GET_BY_ID;
-using syncable::GET_BY_PARENTID_AND_DBNAME;
using syncable::ID;
using syncable::IS_BOOKMARK_OBJECT;
using syncable::IS_DEL;
@@ -45,7 +44,7 @@ using syncable::MTIME;
using syncable::MutableEntry;
using syncable::MutableExtendedAttribute;
using syncable::NEXT_ID;
-using syncable::Name;
+using syncable::NON_UNIQUE_NAME;
using syncable::PARENT_ID;
using syncable::PREV_ID;
using syncable::ReadTransaction;
@@ -56,14 +55,12 @@ using syncable::SERVER_IS_BOOKMARK_OBJECT;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_IS_DIR;
using syncable::SERVER_MTIME;
-using syncable::SERVER_NAME;
+using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
using syncable::SERVER_POSITION_IN_PARENT;
using syncable::SERVER_VERSION;
using syncable::SINGLETON_TAG;
using syncable::SYNCER;
-using syncable::SyncName;
-using syncable::UNSANITIZED_NAME;
using syncable::WriteTransaction;
namespace browser_sync {
@@ -71,32 +68,6 @@ namespace browser_sync {
using std::string;
using std::vector;
-// TODO(ncarter): Remove unique-in-parent title support and name conflicts.
-// static
-syncable::Id SyncerUtil::GetNameConflictingItemId(
- syncable::BaseTransaction* trans,
- const syncable::Id& parent_id,
- const PathString& server_name) {
-
- Entry same_path(trans, GET_BY_PARENTID_AND_DBNAME, parent_id, server_name);
- if (same_path.good() && !same_path.GetName().HasBeenSanitized())
- return same_path.Get(ID);
- Name doctored_name(server_name);
- doctored_name.db_value().MakeOSLegal();
- if (!doctored_name.HasBeenSanitized())
- return syncable::kNullId;
- Directory::ChildHandles children;
- trans->directory()->GetChildHandles(trans, parent_id, &children);
- Directory::ChildHandles::iterator i = children.begin();
- while (i != children.end()) {
- Entry child_entry(trans, GET_BY_HANDLE, *i++);
- CHECK(child_entry.good());
- if (0 == ComparePathNames(child_entry.Get(UNSANITIZED_NAME), server_name))
- return child_entry.Get(ID);
- }
- return syncable::kNullId;
-}
-
// Returns the number of unsynced entries.
// static
int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
@@ -220,30 +191,6 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
syncable::MutableEntry* const entry,
ConflictResolver* resolver) {
- syncable::Id conflicting_id;
- UpdateAttemptResponse result =
- AttemptToUpdateEntryWithoutMerge(trans, entry, &conflicting_id);
- if (result != NAME_CONFLICT) {
- return result;
- }
- syncable::MutableEntry same_path(trans, syncable::GET_BY_ID, conflicting_id);
- CHECK(same_path.good());
-
- if (resolver &&
- resolver->AttemptItemMerge(trans, &same_path, entry)) {
- return SUCCESS;
- }
- LOG(INFO) << "Not updating item, path collision. Update:\n" << *entry
- << "\nSame Path:\n" << same_path;
- return CONFLICT;
-}
-
-// static
-UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
- syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- syncable::Id* const conflicting_id) {
-
CHECK(entry->good());
if (!entry->Get(IS_UNAPPLIED_UPDATE))
return SUCCESS; // No work to do.
@@ -273,16 +220,6 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
return CONFLICT;
}
}
- PathString server_name = entry->Get(SERVER_NAME);
- syncable::Id conflict_id =
- SyncerUtil::GetNameConflictingItemId(trans,
- entry->Get(SERVER_PARENT_ID),
- server_name);
- if (conflict_id != syncable::kNullId && conflict_id != id) {
- if (conflicting_id)
- *conflicting_id = conflict_id;
- return NAME_CONFLICT;
- }
} else if (entry->Get(IS_DIR)) {
Directory::ChildHandles handles;
trans->directory()->GetChildHandles(trans, id, &handles);
@@ -304,7 +241,7 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntryWithoutMerge(
void SyncerUtil::UpdateServerFieldsFromUpdate(
MutableEntry* local_entry,
const SyncEntity& server_entry,
- const SyncName& name) {
+ const PathString& name) {
if (server_entry.deleted()) {
// The server returns very lightweight replies for deletions, so we don't
// clobber a bunch of fields on delete.
@@ -319,7 +256,7 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
CHECK(local_entry->Get(ID) == server_entry.id())
<< "ID Changing not supported here";
local_entry->Put(SERVER_PARENT_ID, server_entry.parent_id());
- local_entry->PutServerName(name);
+ local_entry->Put(SERVER_NON_UNIQUE_NAME, name);
local_entry->Put(SERVER_VERSION, server_entry.version());
local_entry->Put(SERVER_CTIME,
ServerTimeToClientTime(server_entry.ctime()));
@@ -421,7 +358,7 @@ bool SyncerUtil::ServerAndLocalEntriesMatch(syncable::Entry* entry) {
if (entry->Get(IS_DEL) && entry->Get(SERVER_IS_DEL))
return true;
// Name should exactly match here.
- if (!entry->SyncNameMatchesServerName()) {
+ if (!(entry->Get(NON_UNIQUE_NAME) == entry->Get(SERVER_NON_UNIQUE_NAME))) {
LOG(WARNING) << "Unsanitized name mismatch";
return false;
}
@@ -486,31 +423,12 @@ void SyncerUtil::UpdateLocalDataFromServerData(
entry->Put(IS_BOOKMARK_OBJECT, entry->Get(SERVER_IS_BOOKMARK_OBJECT));
// This strange dance around the IS_DEL flag avoids problems when setting
// the name.
+ // TODO(chron): Is this still an issue? Unit test this codepath.
if (entry->Get(SERVER_IS_DEL)) {
entry->Put(IS_DEL, true);
} else {
- Name name = Name::FromSyncName(entry->GetServerName());
- name.db_value().MakeOSLegal();
- bool was_doctored_before_made_noncolliding = name.HasBeenSanitized();
- name.db_value().MakeNoncollidingForEntry(trans,
- entry->Get(SERVER_PARENT_ID),
- entry);
- bool was_doctored = name.HasBeenSanitized();
- if (was_doctored) {
- // If we're changing the name of entry, either its name should be
- // illegal, or some other entry should have an unsanitized name.
- // There's should be a CHECK in every code path.
- Entry blocking_entry(trans, GET_BY_PARENTID_AND_DBNAME,
- entry->Get(SERVER_PARENT_ID),
- name.value());
- if (blocking_entry.good())
- CHECK(blocking_entry.GetName().HasBeenSanitized());
- else
- CHECK(was_doctored_before_made_noncolliding);
- }
- CHECK(entry->PutParentIdAndName(entry->Get(SERVER_PARENT_ID), name))
- << "Name Clash in UpdateLocalDataFromServerData: "
- << *entry;
+ entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME));
+ entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID));
CHECK(entry->Put(IS_DEL, false));
Id new_predecessor = ComputePrevIdFromServerPosition(trans, entry,
entry->Get(SERVER_PARENT_ID));
diff --git a/chrome/browser/sync/engine/syncer_util.h b/chrome/browser/sync/engine/syncer_util.h
index cf9d9c7..9f38873 100644..100755
--- a/chrome/browser/sync/engine/syncer_util.h
+++ b/chrome/browser/sync/engine/syncer_util.h
@@ -26,12 +26,6 @@ class SyncEntity;
class SyncerUtil {
public:
- // TODO(ncarter): Remove unique-in-parent title support and name conflicts.
- static syncable::Id GetNameConflictingItemId(
- syncable::BaseTransaction* trans,
- const syncable::Id& parent_id,
- const PathString& server_name);
-
static void ChangeEntryIDAndUpdateChildren(
syncable::WriteTransaction* trans,
syncable::MutableEntry* entry,
@@ -56,16 +50,12 @@ class SyncerUtil {
syncable::MutableEntry* const entry,
ConflictResolver* resolver);
- static UpdateAttemptResponse AttemptToUpdateEntryWithoutMerge(
- syncable::WriteTransaction* const trans,
- syncable::MutableEntry* const entry,
- syncable::Id* const conflicting_id);
// Pass in name to avoid redundant UTF8 conversion.
static void UpdateServerFieldsFromUpdate(
syncable::MutableEntry* local_entry,
const SyncEntity& server_entry,
- const syncable::SyncName& name);
+ const PathString& name);
static void ApplyExtendedAttributes(
syncable::MutableEntry* local_entry,
@@ -186,6 +176,7 @@ inline bool ClientAndServerTimeMatch(int64 client_time, int64 server_time) {
// The sync server uses Java Times (ms since 1970)
// and the client uses FILETIMEs (ns since 1601) so we need to convert
// between the timescales.
+// TODO(sync): Fix this. No need to use two timescales.
inline int64 ServerTimeToClientTime(int64 server_time) {
return server_time * GG_LONGLONG(10000) + GG_LONGLONG(116444736000000000);
}
diff --git a/chrome/browser/sync/engine/update_applicator.cc b/chrome/browser/sync/engine/update_applicator.cc
index d0ff392..d0ff392 100644..100755
--- a/chrome/browser/sync/engine/update_applicator.cc
+++ b/chrome/browser/sync/engine/update_applicator.cc
diff --git a/chrome/browser/sync/engine/update_applicator.h b/chrome/browser/sync/engine/update_applicator.h
index dc27e17..dc27e17 100644..100755
--- a/chrome/browser/sync/engine/update_applicator.h
+++ b/chrome/browser/sync/engine/update_applicator.h
diff --git a/chrome/browser/sync/engine/verify_updates_command.cc b/chrome/browser/sync/engine/verify_updates_command.cc
index 83d0257..216eff5 100644
--- a/chrome/browser/sync/engine/verify_updates_command.cc
+++ b/chrome/browser/sync/engine/verify_updates_command.cc
@@ -68,9 +68,8 @@ VerifyResult VerifyUpdatesCommand::VerifyUpdate(
return VERIFY_FAIL;
}
{
- SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
- if ((name.value().empty() || name.non_unique_value().empty()) &&
- !deleted) {
+ const std::string name = SyncerProtoUtil::NameFromSyncEntity(entry);
+ if (name.empty() && !deleted) {
LOG(ERROR) << "Zero length name in non-deleted update";
return VERIFY_FAIL;
}
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
index 173dc0b..45b72cb 100644..100755
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -37,38 +37,7 @@ namespace syncable {
static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
-static const int32 kCurrentDBVersion = 67;
-
-#if OS_WIN
-// TODO(sync): remove
-static void PathNameMatch16(sqlite3_context* context, int argc,
- sqlite3_value** argv) {
- const PathString pathspec(reinterpret_cast<const PathChar*>
- (sqlite3_value_text16(argv[0])), sqlite3_value_bytes16(argv[0]) / 2);
-
- const void* name_text = sqlite3_value_text16(argv[1]);
- int name_bytes = sqlite3_value_bytes16(argv[1]);
- // If the text is null, we need to avoid the PathString constructor.
- if (name_text != NULL) {
- // Have to copy to append a terminating 0 anyway.
- const PathString name(reinterpret_cast<const PathChar*>
- (sqlite3_value_text16(argv[1])),
- sqlite3_value_bytes16(argv[1]) / 2);
- sqlite3_result_int(context, PathNameMatch(name, pathspec));
- } else {
- sqlite3_result_int(context, PathNameMatch(PathString(), pathspec));
- }
-}
-
-// Sqlite allows setting of the escape character in an ESCAPE clause and
-// this character is passed in as a third character to the like function.
-// See: http://www.sqlite.org/lang_expr.html
-static void PathNameMatch16WithEscape(sqlite3_context* context,
- int argc, sqlite3_value** argv) {
- // Never seen this called, but just in case.
- LOG(FATAL) << "PathNameMatch16WithEscape() not implemented";
-}
-#endif
+static const int32 kCurrentDBVersion = 68;
static void RegisterPathNameCollate(sqlite3* dbhandle) {
const int collate = SQLITE_UTF8;
@@ -76,23 +45,6 @@ static void RegisterPathNameCollate(sqlite3* dbhandle) {
NULL, &ComparePathNames16));
}
-// Replace the LIKE operator with our own implementation that does file spec
-// matching like "*.txt".
-static void RegisterPathNameMatch(sqlite3* dbhandle) {
- // We only register this on Windows. We use the normal sqlite
- // matching function on mac/linux.
- // note that the function PathNameMatch() does a simple ==
- // comparison on mac, so that would have to be fixed if
- // we really wanted to use PathNameMatch on mac/linux w/ the
- // same pattern strings as we do on windows.
-#if defined(OS_WIN)
- CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
- 2, SQLITE_ANY, NULL, &PathNameMatch16, NULL, NULL));
- CHECK(SQLITE_OK == sqlite3_create_function(dbhandle, "like",
- 3, SQLITE_ANY, NULL, &PathNameMatch16WithEscape, NULL, NULL));
-#endif // OS_WIN
-}
-
static inline bool IsSqliteErrorOurFault(int result) {
switch (result) {
case SQLITE_MISMATCH:
@@ -265,7 +217,7 @@ bool DirectoryBackingStore::OpenAndConfigureHandleHelper(
if (SQLITE_OK == SqliteOpen(backing_filepath_, handle)) {
sqlite3_busy_timeout(*handle, kDirectoryBackingStoreBusyTimeoutMs);
RegisterPathNameCollate(*handle);
- RegisterPathNameMatch(*handle);
+
return true;
}
return false;
diff --git a/chrome/browser/sync/syncable/syncable.cc b/chrome/browser/sync/syncable/syncable.cc
index af2cf28..97dd525 100644..100755
--- a/chrome/browser/sync/syncable/syncable.cc
+++ b/chrome/browser/sync/syncable/syncable.cc
@@ -22,6 +22,7 @@
#include <functional>
#include <iomanip>
#include <iterator>
+#include <limits>
#include <set>
#include <string>
@@ -88,6 +89,7 @@ int64 Now() {
// Compare functions and hashes for the indices.
// Callback for sqlite3
+// TODO(chron): This should be somewhere else
int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
const void* b) {
int result = base::strncasecmp(reinterpret_cast<const char *>(a),
@@ -118,35 +120,32 @@ class HashField {
base::hash_set<int64> hasher_;
};
-// TODO(ncarter): Rename!
+// TODO(chron): Remove this function.
int ComparePathNames(const PathString& a, const PathString& b) {
const size_t val_size = sizeof(PathString::value_type);
return ComparePathNames16(NULL, a.size() * val_size, a.data(),
b.size() * val_size, b.data());
}
-class LessParentIdAndNames {
+class LessParentIdAndHandle {
public:
bool operator() (const syncable::EntryKernel* a,
const syncable::EntryKernel* b) const {
- if (a->ref(PARENT_ID) != b->ref(PARENT_ID))
+ if (a->ref(PARENT_ID) != b->ref(PARENT_ID)) {
return a->ref(PARENT_ID) < b->ref(PARENT_ID);
- return ComparePathNames(a->ref(NAME), b->ref(NAME)) < 0;
+ }
+
+ // Meta handles are immutable per entry so this is ideal.
+ return a->ref(META_HANDLE) < b->ref(META_HANDLE);
}
};
+// TODO(chron): Remove this function.
bool LessPathNames::operator() (const PathString& a,
const PathString& b) const {
return ComparePathNames(a, b) < 0;
}
-// static
-Name Name::FromEntryKernel(EntryKernel* kernel) {
- PathString& sync_name_ref = kernel->ref(UNSANITIZED_NAME).empty() ?
- kernel->ref(NAME) : kernel->ref(UNSANITIZED_NAME);
- return Name(kernel->ref(NAME), sync_name_ref, kernel->ref(NON_UNIQUE_NAME));
-}
-
///////////////////////////////////////////////////////////////////////////
// Directory
@@ -161,7 +160,7 @@ Directory::Kernel::Kernel(const FilePath& db_path,
name_(name),
metahandles_index(new Directory::MetahandlesIndex),
ids_index(new Directory::IdsIndex),
- parent_id_and_names_index(new Directory::ParentIdAndNamesIndex),
+ parent_id_child_index(new Directory::ParentIdChildIndex),
extended_attributes(new ExtendedAttributes),
unapplied_update_metahandles(new MetahandleSet),
unsynced_metahandles(new MetahandleSet),
@@ -196,7 +195,7 @@ Directory::Kernel::~Kernel() {
delete unsynced_metahandles;
delete unapplied_update_metahandles;
delete extended_attributes;
- delete parent_id_and_names_index;
+ delete parent_id_child_index;
delete ids_index;
for_each(metahandles_index->begin(), metahandles_index->end(), DeleteEntry);
delete metahandles_index;
@@ -209,58 +208,6 @@ Directory::~Directory() {
Close();
}
-BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec) {
-#if defined(OS_WIN)
- // Note that if we go Vista only this is easier:
- // http://msdn2.microsoft.com/en-us/library/ms628611.aspx
-
- // PathMatchSpec strips spaces from the start of pathspec, so we compare those
- // ourselves.
- const PathChar* pathname_ptr = pathname.c_str();
- const PathChar* pathspec_ptr = pathspec.c_str();
-
- while (*pathname_ptr == ' ' && *pathspec_ptr == ' ')
- ++pathname_ptr, ++pathspec_ptr;
-
- // If we have more inital spaces in the pathspec than in the pathname then the
- // result from PathMatchSpec will be erroneous.
- if (*pathspec_ptr == ' ')
- return FALSE;
-
- // PathMatchSpec also gets "confused" when there are ';' characters in name or
- // in spec. So, if we match (f.i.) ";" with ";" PathMatchSpec will return
- // FALSE (which is wrong). Luckily for us, we can easily fix this by
- // substituting ';' with ':' which is illegal character in file name and
- // we're not going to see it there. With ':' in path name and spec
- // PathMatchSpec works fine.
- if ((NULL == strchr(pathname_ptr, ';')) &&
- (NULL == strchr(pathspec_ptr, ';'))) {
- // No ';' in file name and in spec. Just pass it as it is.
- return ::PathMatchSpecA(pathname_ptr, pathspec_ptr);
- }
-
- // We need to subst ';' with ':' in both, name and spec.
- PathString name_subst(pathname_ptr);
- PathString spec_subst(pathspec_ptr);
-
- PathString::size_type index = name_subst.find(L';');
- while (PathString::npos != index) {
- name_subst[index] = ':';
- index = name_subst.find(';', index + 1);
- }
-
- index = spec_subst.find(L';');
- while (PathString::npos != index) {
- spec_subst[index] = ':';
- index = spec_subst.find(';', index + 1);
- }
-
- return ::PathMatchSpecA(name_subst.c_str(), spec_subst.c_str());
-#else
- return 0 == ComparePathNames(pathname, pathspec);
-#endif
-}
-
DirOpenResult Directory::Open(const FilePath& file_path,
const PathString& name) {
const DirOpenResult result = OpenImpl(file_path, name);
@@ -274,7 +221,7 @@ void Directory::InitializeIndices() {
for (; it != kernel_->metahandles_index->end(); ++it) {
EntryKernel* entry = *it;
if (!entry->ref(IS_DEL))
- kernel_->parent_id_and_names_index->insert(entry);
+ kernel_->parent_id_child_index->insert(entry);
kernel_->ids_index->insert(entry);
if (entry->ref(IS_UNSYNCED))
kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
@@ -377,61 +324,14 @@ EntryKernel* Directory::GetEntryByHandle(const int64 metahandle,
return NULL;
}
-EntryKernel* Directory::GetChildWithName(const Id& parent_id,
- const PathString& name) {
- ScopedKernelLock lock(this);
- return GetChildWithName(parent_id, name, &lock);
-}
-
-// Will return child entry if the folder is opened,
-// otherwise it will return NULL.
-EntryKernel* Directory::GetChildWithName(const Id& parent_id,
- const PathString& name,
- ScopedKernelLock* const lock) {
- PathString dbname = name;
- EntryKernel* parent = GetEntryById(parent_id, lock);
- if (parent == NULL)
- return NULL;
- return GetChildWithNameImpl(parent_id, dbname, lock);
-}
-
-// Will return child entry even when the folder is not opened. This is used by
-// syncer to apply update when folder is closed.
-EntryKernel* Directory::GetChildWithDBName(const Id& parent_id,
- const PathString& name) {
- ScopedKernelLock lock(this);
- return GetChildWithNameImpl(parent_id, name, &lock);
-}
-
-EntryKernel* Directory::GetChildWithNameImpl(const Id& parent_id,
- const PathString& name,
- ScopedKernelLock* const lock) {
- // First look up in memory:
- kernel_->needle.ref(NAME) = name;
- kernel_->needle.ref(PARENT_ID) = parent_id;
- ParentIdAndNamesIndex::iterator found =
- kernel_->parent_id_and_names_index->find(&kernel_->needle);
- if (found != kernel_->parent_id_and_names_index->end()) {
- // Found it in memory. Easy.
- return *found;
- }
- return NULL;
-}
-
// An interface to specify the details of which children
// GetChildHandles() is looking for.
+// TODO(chron): Clean this up into one function to get child handles
struct PathMatcher {
explicit PathMatcher(const Id& parent_id) : parent_id_(parent_id) { }
virtual ~PathMatcher() { }
- enum MatchType {
- NO_MATCH,
- MATCH,
- // Means we found the only entry we're looking for in
- // memory so we don't need to check the DB.
- EXACT_MATCH
- };
- virtual MatchType PathMatches(const PathString& path) = 0;
- typedef Directory::ParentIdAndNamesIndex Index;
+
+ typedef Directory::ParentIdChildIndex Index;
virtual Index::iterator lower_bound(Index* index) = 0;
virtual Index::iterator upper_bound(Index* index) = 0;
const Id parent_id_;
@@ -439,48 +339,22 @@ struct PathMatcher {
};
// Matches all children.
+// TODO(chron): Unit test this by itself
struct AllPathsMatcher : public PathMatcher {
explicit AllPathsMatcher(const Id& parent_id) : PathMatcher(parent_id) {
}
- virtual MatchType PathMatches(const PathString& path) {
- return MATCH;
- }
- virtual Index::iterator lower_bound(Index* index) {
- needle_.ref(PARENT_ID) = parent_id_;
- needle_.ref(NAME).clear();
- return index->lower_bound(&needle_);
- }
- virtual Index::iterator upper_bound(Index* index) {
- needle_.ref(PARENT_ID) = parent_id_;
- needle_.ref(NAME).clear();
- Index::iterator i = index->upper_bound(&needle_),
- end = index->end();
- while (i != end && (*i)->ref(PARENT_ID) == parent_id_)
- ++i;
- return i;
- }
-};
-
-// Matches an exact filename only; no wildcards.
-struct ExactPathMatcher : public PathMatcher {
- ExactPathMatcher(const PathString& pathspec, const Id& parent_id)
- : PathMatcher(parent_id), pathspec_(pathspec) {
- }
- virtual MatchType PathMatches(const PathString& path) {
- return 0 == ComparePathNames(path, pathspec_) ? EXACT_MATCH : NO_MATCH;
- }
virtual Index::iterator lower_bound(Index* index) {
needle_.ref(PARENT_ID) = parent_id_;
- needle_.ref(NAME) = pathspec_;
+ needle_.ref(META_HANDLE) = std::numeric_limits<int64>::min();
return index->lower_bound(&needle_);
}
+
virtual Index::iterator upper_bound(Index* index) {
needle_.ref(PARENT_ID) = parent_id_;
- needle_.ref(NAME) = pathspec_;
+ needle_.ref(META_HANDLE) = std::numeric_limits<int64>::max();
return index->upper_bound(&needle_);
}
- const PathString pathspec_;
};
void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
@@ -496,21 +370,17 @@ void Directory::GetChildHandlesImpl(BaseTransaction* trans, const Id& parent_id,
result->clear();
{
ScopedKernelLock lock(this);
- ParentIdAndNamesIndex* const index =
- kernel_->parent_id_and_names_index;
- typedef ParentIdAndNamesIndex::iterator iterator;
+
+ // This index is sorted by parent id and metahandle.
+ ParentIdChildIndex* const index = kernel_->parent_id_child_index;
+ typedef ParentIdChildIndex::iterator iterator;
for (iterator i = matcher->lower_bound(index),
end = matcher->upper_bound(index); i != end; ++i) {
// root's parent_id is NULL in the db but 0 in memory, so
// have avoid listing the root as its own child.
if ((*i)->ref(ID) == (*i)->ref(PARENT_ID))
continue;
- PathMatcher::MatchType match = matcher->PathMatches((*i)->ref(NAME));
- if (PathMatcher::NO_MATCH == match)
- continue;
result->push_back((*i)->ref(META_HANDLE));
- if (PathMatcher::EXACT_MATCH == match)
- return;
}
}
}
@@ -519,17 +389,6 @@ EntryKernel* Directory::GetRootEntry() {
return GetEntryById(Id());
}
-EntryKernel* Directory::GetEntryByPath(const PathString& path) {
- CHECK(kernel_);
- EntryKernel* result = GetRootEntry();
- CHECK(result) << "There should always be a root node.";
- for (PathSegmentIterator<PathString> i(path), end;
- i != end && NULL != result; ++i) {
- result = GetChildWithName(result->ref(ID), *i);
- }
- return result;
-}
-
void ZeroFields(EntryKernel* entry, int first_field) {
int i = first_field;
// Note that bitset<> constructor sets all bits to zero, and strings
@@ -554,29 +413,27 @@ void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
CHECK(NULL != entry);
static const char error[] = "Entry already in memory index.";
CHECK(kernel_->metahandles_index->insert(entry).second) << error;
- if (!entry->ref(IS_DEL))
- CHECK(kernel_->parent_id_and_names_index->insert(entry).second) << error;
+
+ if (!entry->ref(IS_DEL)) {
+ CHECK(kernel_->parent_id_child_index->insert(entry).second) << error;
+ }
CHECK(kernel_->ids_index->insert(entry).second) << error;
}
-bool Directory::Undelete(EntryKernel* const entry) {
+void Directory::Undelete(EntryKernel* const entry) {
DCHECK(entry->ref(IS_DEL));
ScopedKernelLock lock(this);
- if (NULL != GetChildWithName(entry->ref(PARENT_ID), entry->ref(NAME), &lock))
- return false; // Would have duplicated existing entry.
entry->ref(IS_DEL) = false;
entry->dirty[IS_DEL] = true;
- CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
- return true;
+ CHECK(kernel_->parent_id_child_index->insert(entry).second);
}
-bool Directory::Delete(EntryKernel* const entry) {
+void Directory::Delete(EntryKernel* const entry) {
DCHECK(!entry->ref(IS_DEL));
entry->ref(IS_DEL) = true;
entry->dirty[IS_DEL] = true;
ScopedKernelLock lock(this);
- CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
- return true;
+ CHECK(1 == kernel_->parent_id_child_index->erase(entry));
}
bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
@@ -589,30 +446,22 @@ bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
return true;
}
-bool Directory::ReindexParentIdAndName(EntryKernel* const entry,
- const Id& new_parent_id,
- const PathString& new_name) {
+void Directory::ReindexParentId(EntryKernel* const entry,
+ const Id& new_parent_id) {
+
ScopedKernelLock lock(this);
- PathString new_indexed_name = new_name;
if (entry->ref(IS_DEL)) {
entry->ref(PARENT_ID) = new_parent_id;
- entry->ref(NAME) = new_indexed_name;
- return true;
+ return;
}
- // check for a case changing rename
- if (entry->ref(PARENT_ID) == new_parent_id &&
- 0 == ComparePathNames(entry->ref(NAME), new_indexed_name)) {
- entry->ref(NAME) = new_indexed_name;
- } else {
- if (NULL != GetChildWithName(new_parent_id, new_indexed_name, &lock))
- return false;
- CHECK(1 == kernel_->parent_id_and_names_index->erase(entry));
- entry->ref(PARENT_ID) = new_parent_id;
- entry->ref(NAME) = new_indexed_name;
- CHECK(kernel_->parent_id_and_names_index->insert(entry).second);
+ if (entry->ref(PARENT_ID) == new_parent_id) {
+ return;
}
- return true;
+
+ CHECK(1 == kernel_->parent_id_child_index->erase(entry));
+ entry->ref(PARENT_ID) = new_parent_id;
+ CHECK(kernel_->parent_id_child_index->insert(entry).second);
}
// static
@@ -971,7 +820,7 @@ void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
}
if (!e.Get(IS_DEL)) {
CHECK(id != parentid) << e;
- CHECK(!e.Get(NAME).empty()) << e;
+ CHECK(!e.Get(NON_UNIQUE_NAME).empty()) << e;
int safety_count = handles.size() + 1;
while (!parentid.IsRoot()) {
if (!idfilter.ShouldConsider(parentid))
@@ -1148,24 +997,6 @@ Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
kernel_ = trans->directory()->GetEntryByHandle(metahandle);
}
-Entry::Entry(BaseTransaction* trans, GetByPath, const PathString& path)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetEntryByPath(path);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& parentid,
- const PathString& name)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetChildWithName(parentid, name);
-}
-
-Entry::Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& parentid,
- const PathString& name)
- : basetrans_(trans) {
- kernel_ = trans->directory()->GetChildWithDBName(parentid, name);
-}
-
-
Directory* Entry::dir() const {
return basetrans_->directory();
}
@@ -1194,11 +1025,8 @@ void Entry::DeleteAllExtendedAttributes(WriteTransaction *trans) {
MutableEntry::MutableEntry(WriteTransaction* trans, Create,
const Id& parent_id, const PathString& name)
- : Entry(trans), write_transaction_(trans) {
- if (NULL != trans->directory()->GetChildWithName(parent_id, name)) {
- kernel_ = NULL; // would have duplicated an existing entry.
- return;
- }
+ : Entry(trans),
+ write_transaction_(trans) {
Init(trans, parent_id, name);
}
@@ -1213,8 +1041,6 @@ void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
kernel_->dirty[META_HANDLE] = true;
kernel_->ref(PARENT_ID) = parent_id;
kernel_->dirty[PARENT_ID] = true;
- kernel_->ref(NAME) = name;
- kernel_->dirty[NAME] = true;
kernel_->ref(NON_UNIQUE_NAME) = name;
kernel_->dirty[NON_UNIQUE_NAME] = true;
kernel_->ref(IS_NEW) = true;
@@ -1266,38 +1092,17 @@ MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
trans->SaveOriginal(kernel_);
}
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByPath,
- const PathString& path)
- : Entry(trans, GET_BY_PATH, path), write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndName,
- const Id& parentid, const PathString& name)
- : Entry(trans, GET_BY_PARENTID_AND_NAME, parentid, name),
- write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
-MutableEntry::MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
- const Id& parentid, const PathString& name)
- : Entry(trans, GET_BY_PARENTID_AND_DBNAME, parentid, name),
- write_transaction_(trans) {
- trans->SaveOriginal(kernel_);
-}
-
bool MutableEntry::PutIsDel(bool is_del) {
DCHECK(kernel_);
- if (is_del == kernel_->ref(IS_DEL))
+ if (is_del == kernel_->ref(IS_DEL)) {
return true;
+ }
if (is_del) {
UnlinkFromOrder();
- if (!dir()->Delete(kernel_))
- return false;
+ dir()->Delete(kernel_);
return true;
} else {
- if (!dir()->Undelete(kernel_))
- return false;
+ dir()->Undelete(kernel_);
PutPredecessor(Id()); // Restores position to the 0th index.
return true;
}
@@ -1319,8 +1124,8 @@ bool MutableEntry::Put(IdField field, const Id& value) {
if (!dir()->ReindexId(kernel_, value))
return false;
} else if (PARENT_ID == field) {
- if (!dir()->ReindexParentIdAndName(kernel_, value, kernel_->ref(NAME)))
- return false;
+ dir()->ReindexParentId(kernel_, value);
+ PutPredecessor(Id());
} else {
kernel_->ref(field) = value;
}
@@ -1345,13 +1150,7 @@ bool MutableEntry::Put(StringField field, const PathString& value) {
bool MutableEntry::PutImpl(StringField field, const PathString& value) {
DCHECK(kernel_);
if (kernel_->ref(field) != value) {
- if (NAME == field) {
- if (!dir()->ReindexParentIdAndName(kernel_, kernel_->ref(PARENT_ID),
- value))
- return false;
- } else {
- kernel_->ref(field) = value;
- }
+ kernel_->ref(field) = value;
kernel_->dirty[static_cast<int>(field)] = true;
}
return true;
@@ -1377,29 +1176,6 @@ bool MutableEntry::Put(IndexedBitField field, bool value) {
return true;
}
-// Avoids temporary collision in index when renaming a bookmark to another
-// folder.
-bool MutableEntry::PutParentIdAndName(const Id& parent_id,
- const Name& name) {
- DCHECK(kernel_);
- const bool parent_id_changes = parent_id != kernel_->ref(PARENT_ID);
- bool db_name_changes = name.db_value() != kernel_->ref(NAME);
- if (parent_id_changes || db_name_changes) {
- if (!dir()->ReindexParentIdAndName(kernel_, parent_id,
- name.db_value()))
- return false;
- }
- Put(UNSANITIZED_NAME, name.GetUnsanitizedName());
- Put(NON_UNIQUE_NAME, name.non_unique_value());
- if (db_name_changes)
- kernel_->dirty[NAME] = true;
- if (parent_id_changes) {
- kernel_->dirty[PARENT_ID] = true;
- PutPredecessor(Id()); // Put in the 0th position.
- }
- return true;
-}
-
void MutableEntry::UnlinkFromOrder() {
Id old_previous = Get(PREV_ID);
Id old_next = Get(NEXT_ID);
@@ -1572,61 +1348,6 @@ bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
return true;
}
-// returns -1 if s contains any non [0-9] characters
-static int PathStringToInteger(PathString s) {
- PathString::const_iterator i = s.begin();
- for (; i != s.end(); ++i) {
- if (PathString::npos == PathString(PSTR("0123456789")).find(*i))
- return -1;
- }
- return atoi(s.c_str());
-}
-
-// appends ~1 to the end of 's' unless there is already ~#, in which case
-// it just increments the number
-static PathString FixBasenameInCollision(const PathString s) {
- PathString::size_type last_tilde = s.find_last_of(PSTR('~'));
- if (PathString::npos == last_tilde) return s + PSTR("~1");
- if (s.size() == (last_tilde + 1)) return s + PSTR("1");
- // we have ~, but not necessarily ~# (for some number >= 0). check for that
- int n;
- if ((n = PathStringToInteger(s.substr(last_tilde + 1))) != -1) {
- n++;
- PathString pre_number = s.substr(0, last_tilde + 1);
- return pre_number + IntToString(n);
- } else {
- // we have a ~, but not a number following it, so we'll add another
- // ~ and this time, a number
- return s + PSTR("~1");
- }
-}
-
-void DBName::MakeNoncollidingForEntry(BaseTransaction* trans,
- const Id& parent_id,
- Entry* e) {
- const PathString& desired_name = *this;
- CHECK(!desired_name.empty());
- PathString::size_type first_dot = desired_name.find_first_of(PSTR('.'));
- if (PathString::npos == first_dot)
- first_dot = desired_name.size();
- PathString basename = desired_name.substr(0, first_dot);
- PathString dotextension = desired_name.substr(first_dot);
- CHECK(basename + dotextension == desired_name);
- for (;;) {
- // Check for collision.
- PathString testname = basename + dotextension;
- Entry same_path_entry(trans, GET_BY_PARENTID_AND_DBNAME,
- parent_id, testname);
- if (!same_path_entry.good() || (e && same_path_entry.Get(ID) == e->Get(ID)))
- break;
- // There was a collision, so fix the name.
- basename = FixBasenameInCollision(basename);
- }
- // Set our value to the new value. This invalidates desired_name.
- PathString new_value = basename + dotextension;
- swap(new_value);
-}
-
const Blob* GetExtendedAttributeValue(const Entry& e,
const PathString& attribute_name) {
ExtendedAttributeKey key(e.Get(META_HANDLE), attribute_name);
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
index c92ea7b..1b541ed 100644..100755
--- a/chrome/browser/sync/syncable/syncable.h
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -131,20 +131,9 @@ enum {
};
enum StringField {
- // The name, transformed so as to be suitable for use as a path-element. It
- // is unique, and legal for this client.
- NAME = STRING_FIELDS_BEGIN,
- // The local name, pre-sanitization. It is not necessarily unique. If this
- // is empty, it means |NAME| did not require sanitization.
- UNSANITIZED_NAME,
- // If NAME/UNSANITIZED_NAME are "Foo (2)", then NON_UNIQUE_NAME may be "Foo".
- NON_UNIQUE_NAME,
- // The server version of |NAME|. It is uniquified, but not necessarily
- // OS-legal.
- SERVER_NAME,
- // The server version of |NON_UNIQUE_NAME|. Again, if SERVER_NAME is
- // like "Foo (2)" due to a commit-time name aside, SERVER_NON_UNIQUE_NAME
- // may hold the value "Foo".
+ // Name, will be truncated by server. Can be duplicated in a folder.
+ NON_UNIQUE_NAME = STRING_FIELDS_BEGIN,
+ // The server version of |NON_UNIQUE_NAME|.
SERVER_NON_UNIQUE_NAME,
// For bookmark entries, the URL of the bookmark.
BOOKMARK_URL,
@@ -186,9 +175,6 @@ enum {
enum BitTemp {
SYNCING = BIT_TEMPS_BEGIN,
IS_NEW, // Means use INSERT instead of UPDATE to save to db.
- DEPRECATED_DELETE_ON_CLOSE, // Set by redirector, IS_OPEN must also be set.
- DEPRECATED_CHANGED_SINCE_LAST_OPEN, // Have we been written to since we've
- // been opened.
BIT_TEMPS_END,
};
@@ -223,19 +209,6 @@ enum GetByHandle {
GET_BY_HANDLE
};
-enum GetByPath {
- GET_BY_PATH
-};
-
-enum GetByParentIdAndName {
- GET_BY_PARENTID_AND_NAME
-};
-
-// DBName is the name stored in the database.
-enum GetByParentIdAndDBName {
- GET_BY_PARENTID_AND_DBNAME
-};
-
enum Create {
CREATE
};
@@ -246,182 +219,6 @@ enum CreateNewUpdateItem {
typedef std::set<PathString> AttributeKeySet;
-// DBName is a PathString with additional transformation methods that are
-// useful when trying to derive a unique and legal database name from an
-// unsanitized sync name.
-class DBName : public PathString {
- public:
- explicit DBName(const PathString& database_name)
- : PathString(database_name) { }
-
- // TODO(ncarter): Remove these codepaths to maintain alternate titles which
- // are OS legal filenames, Chrome doesn't depend on this like some other
- // browsers do.
- void MakeOSLegal() {
- PathString new_value = MakePathComponentOSLegal(*this);
- if (!new_value.empty())
- swap(new_value);
- }
-
- // Modify the value of this DBName so that it is not in use by any entry
- // inside |parent_id|, except maybe |e|. |e| may be NULL if you are trying
- // to compute a name for an entry which has yet to be created.
- void MakeNoncollidingForEntry(BaseTransaction* trans,
- const Id& parent_id,
- Entry* e);
-};
-
-// SyncName encapsulates a canonical server name. In general, when we need to
-// muck around with a name that the server sends us (e.g. to make it OS legal),
-// we try to preserve the original value in a SyncName,
-// and distill the new local value into a DBName.
-// At other times, we need to apply transforms in the
-// other direction -- that is, to create a server-appropriate SyncName from a
-// user-updated DBName (which is an OS legal name, but not necessarily in the
-// format that the server wants it to be). For that sort of thing, you should
-// initialize a SyncName from the DB name value, and use the methods of
-// SyncName to canonicalize it. At other times, you have a pair of canonical
-// server values -- one (the "value") which is unique in the parent, and another
-// (the "non unique value") which is not unique in the parent -- and you
-// simply want to create a SyncName to hold them as a pair.
-class SyncName {
- public:
- // Create a SyncName with the initially specified value.
- explicit SyncName(const PathString& sync_name)
- : value_(sync_name), non_unique_value_(sync_name) { }
-
- // Create a SyncName by specifying a value and a non-unique value. If
- // you use this constructor, the values you provide should already be
- // acceptable server names. Don't use the mutation/sanitization methods
- // on the resulting instance -- mutation won't work if you have distinct
- // values for the unique and non-unique fields.
- SyncName(const PathString& unique_sync_name,
- const PathString& non_unique_sync_name)
- : value_(unique_sync_name), non_unique_value_(non_unique_sync_name) { }
-
- // Transform |value_| so that it's a legal server name.
- void MakeServerLegal() {
- DCHECK_EQ(value_, non_unique_value_)
- << "Deriving value_ will overwrite non_unique_value_.";
- // Append a trailing space if the value is one of the server's three
- // forbidden special cases.
- if (value_.empty() ||
- value_ == PSTR(".") ||
- value_ == PSTR("..")) {
- value_.append(PSTR(" "));
- non_unique_value_ = value_;
- }
- // TODO(ncarter): Handle server's other requirement: truncation to 256
- // bytes in Unicode NFC.
- }
-
- const PathString& value() const { return value_; }
- PathString& value() { return value_; }
- const PathString& non_unique_value() const { return non_unique_value_; }
- PathString& non_unique_value() { return non_unique_value_; }
- void set_non_unique_value(const PathString& value) {
- non_unique_value_ = value;
- }
-
- inline bool operator==(const SyncName& right_hand_side) const {
- return value_ == right_hand_side.value_ &&
- non_unique_value_ == right_hand_side.non_unique_value_;
- }
- inline bool operator!=(const SyncName& right_hand_side) const {
- return !(*this == right_hand_side);
- }
- private:
- PathString value_;
- PathString non_unique_value_;
-};
-
-// Name is a SyncName which has an additional DBName that provides a way to
-// interpolate the "unsanitized name" according to the syncable convention.
-//
-// A method might accept a Name as an parameter when the sync and database
-// names need to be set simultaneously:
-//
-// void PutName(const Name& new_name) {
-// Put(NAME, new_name.db_value());
-// Put(UNSANITIZED_NAME, new_name.GetUnsanitizedName());
-// }
-//
-// A code point that is trying to convert between local database names and
-// server sync names can use Name to help with the conversion:
-//
-// SyncName server_name = entry->GetServerName();
-// Name name = Name::FromSyncName(server_name); // Initially, name.value()
-// // and name.db_value() are
-// // equal to
-// // server_name.value().
-// name.db_value().MakeOSLegal(); // Updates name.db_value in-place,
-// // leaving name.value() unchanged.
-// foo->PutName(name);
-//
-class Name : public SyncName {
- public:
- // Create a Name with an initially specified db_value and value.
- Name(const PathString& db_name, const PathString& sync_name)
- : SyncName(sync_name), db_value_(db_name) { }
-
- // Create a Name by specifying the db name, sync name, and non-unique
- // sync name values.
- Name(const PathString& db_name, const PathString& sync_name,
- const PathString& non_unique_sync_name)
- : SyncName(sync_name, non_unique_sync_name), db_value_(db_name) { }
-
- // Create a Name with all name values initially equal to the the single
- // specified argument.
- explicit Name(const PathString& sync_and_db_name)
- : SyncName(sync_and_db_name), db_value_(sync_and_db_name) { }
-
- // Create a Name using the local (non-SERVER) fields of an EntryKernel.
- static Name FromEntryKernel(struct EntryKernel*);
-
- // Create a Name from a SyncName. db_value is initially sync_name.value().
- // non_unique_value() and value() are copied from |sync_name|.
- static Name FromSyncName(const SyncName& sync_name) {
- return Name(sync_name.value(), sync_name.value(),
- sync_name.non_unique_value());
- }
-
- static Name FromDBNameAndSyncName(const PathString& db_name,
- const SyncName& sync_name) {
- return Name(db_name, sync_name.value(), sync_name.non_unique_value());
- }
-
- // Get the database name. The non-const version is useful for in-place
- // mutation.
- const DBName& db_value() const { return db_value_; }
- DBName& db_value() { return db_value_; }
-
- // Do the sync names and database names differ? This indicates that
- // the sync name has been sanitized, and that GetUnsanitizedName() will
- // be non-empty.
- bool HasBeenSanitized() const { return db_value_ != value(); }
-
- // Compute the value of the unsanitized name from the current sync and db
- // name values. The unsanitized name is the sync name value, unless the sync
- // name is the same as the db name value, in which case the unsanitized name
- // is empty.
- PathString GetUnsanitizedName() const {
- return HasBeenSanitized() ? value() : PathString();
- }
-
- inline bool operator==(const Name& right_hand_side) const {
- return this->SyncName::operator==(right_hand_side) &&
- db_value_ == right_hand_side.db_value_;
- }
- inline bool operator!=(const Name& right_hand_side) const {
- return !(*this == right_hand_side);
- }
-
- private:
- // The database name, which is maintained to be a legal and unique-in-parent
- // name.
- DBName db_value_;
-};
-
// Why the singular enums? So the code compile-time dispatches instead of
// runtime dispatches as it would with a single enum and an if() statement.
@@ -515,11 +312,6 @@ class Entry {
Entry(BaseTransaction* trans, GetByHandle, int64 handle);
Entry(BaseTransaction* trans, GetById, const Id& id);
Entry(BaseTransaction* trans, GetByTag, const PathString& tag);
- Entry(BaseTransaction* trans, GetByPath, const PathString& path);
- Entry(BaseTransaction* trans, GetByParentIdAndName, const Id& id,
- const PathString& name);
- Entry(BaseTransaction* trans, GetByParentIdAndDBName, const Id& id,
- const PathString& name);
bool good() const { return 0 != kernel_; }
@@ -563,31 +355,12 @@ class Entry {
DCHECK(kernel_);
return kernel_->ref(field);
}
- inline Name GetName() const {
- DCHECK(kernel_);
- return Name::FromEntryKernel(kernel_);
- }
- inline SyncName GetServerName() const {
- DCHECK(kernel_);
- return SyncName(kernel_->ref(SERVER_NAME),
- kernel_->ref(SERVER_NON_UNIQUE_NAME));
- }
- inline bool SyncNameMatchesServerName() const {
- DCHECK(kernel_);
- SyncName sync_name(GetName());
- return sync_name == GetServerName();
- }
- inline PathString GetSyncNameValue() const {
- DCHECK(kernel_);
- // This should always be equal to GetName().sync_name().value(), but may be
- // faster.
- return kernel_->ref(UNSANITIZED_NAME).empty() ? kernel_->ref(NAME) :
- kernel_->ref(UNSANITIZED_NAME);
- }
- inline bool ExistsOnClientBecauseDatabaseNameIsNonEmpty() const {
+
+ inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
DCHECK(kernel_);
- return !kernel_->ref(NAME).empty();
+ return !kernel_->ref(NON_UNIQUE_NAME).empty();
}
+
inline bool IsRoot() const {
DCHECK(kernel_);
return kernel_->ref(ID).IsRoot();
@@ -635,11 +408,6 @@ class MutableEntry : public Entry {
MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
MutableEntry(WriteTransaction* trans, GetByHandle, int64);
MutableEntry(WriteTransaction* trans, GetById, const Id&);
- MutableEntry(WriteTransaction* trans, GetByPath, const PathString& path);
- MutableEntry(WriteTransaction* trans, GetByParentIdAndName, const Id&,
- const PathString& name);
- MutableEntry(WriteTransaction* trans, GetByParentIdAndDBName,
- const Id& parentid, const PathString& name);
inline WriteTransaction* write_transaction() const {
return write_transaction_;
@@ -648,19 +416,12 @@ class MutableEntry : public Entry {
// Field Accessors. Some of them trigger the re-indexing of the entry.
// Return true on success, return false on failure, which means
// that putting the value would have caused a duplicate in the index.
+ // TODO(chron): Remove some of these unecessary return values.
bool Put(Int64Field field, const int64& value);
bool Put(IdField field, const Id& value);
bool Put(StringField field, const PathString& value);
bool Put(BaseVersion field, int64 value);
- inline bool PutName(const Name& name) {
- return (Put(NAME, name.db_value()) &&
- Put(UNSANITIZED_NAME, name.GetUnsanitizedName()) &&
- Put(NON_UNIQUE_NAME, name.non_unique_value()));
- }
- inline bool PutServerName(const SyncName& server_name) {
- return (Put(SERVER_NAME, server_name.value()) &&
- Put(SERVER_NON_UNIQUE_NAME, server_name.non_unique_value()));
- }
+
inline bool Put(BlobField field, const Blob& value) {
return PutField(field, value);
}
@@ -672,10 +433,6 @@ class MutableEntry : public Entry {
}
bool Put(IndexedBitField field, bool value);
- // Avoids temporary collision in index when renaming a bookmark into another
- // folder.
- bool PutParentIdAndName(const Id& parent_id, const Name& name);
-
// Sets the position of this item, and updates the entry kernels of the
// adjacent siblings so that list invariants are maintained. Returns false
// and fails if |predecessor_id| does not identify a sibling. Pass the root
@@ -732,7 +489,7 @@ template <Int64Field field_index>
class SameField;
template <Int64Field field_index>
class HashField;
-class LessParentIdAndNames;
+class LessParentIdAndHandle;
class LessMultiIncusionTargetAndMetahandle;
template <typename FieldType, FieldType field_index>
class LessField;
@@ -922,21 +679,18 @@ class Directory {
std::string cache_guid() const;
protected: // for friends, mainly used by Entry constructors
- EntryKernel* GetChildWithName(const Id& parent_id, const PathString& name);
- EntryKernel* GetChildWithDBName(const Id& parent_id, const PathString& name);
EntryKernel* GetEntryByHandle(const int64 handle);
EntryKernel* GetEntryByHandle(const int64 metahandle, ScopedKernelLock* lock);
EntryKernel* GetEntryById(const Id& id);
EntryKernel* GetEntryByTag(const PathString& tag);
EntryKernel* GetRootEntry();
- EntryKernel* GetEntryByPath(const PathString& path);
bool ReindexId(EntryKernel* const entry, const Id& new_id);
- bool ReindexParentIdAndName(EntryKernel* const entry, const Id& new_parent_id,
- const PathString& new_name);
- // These don't do the semantic checking that the redirector needs.
+ void ReindexParentId(EntryKernel* const entry, const Id& new_parent_id);
+
+ // These don't do semantic checking.
// The semantic checking is implemented higher up.
- bool Undelete(EntryKernel* const entry);
- bool Delete(EntryKernel* const entry);
+ void Undelete(EntryKernel* const entry);
+ void Delete(EntryKernel* const entry);
// Overridden by tests.
virtual DirectoryBackingStore* CreateBackingStore(
@@ -947,12 +701,6 @@ class Directory {
// These private versions expect the kernel lock to already be held
// before calling.
EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
- EntryKernel* GetChildWithName(const Id& parent_id,
- const PathString& name,
- ScopedKernelLock* const lock);
- EntryKernel* GetChildWithNameImpl(const Id& parent_id,
- const PathString& name,
- ScopedKernelLock* const lock);
DirOpenResult OpenImpl(const FilePath& file_path, const PathString& name);
@@ -1081,9 +829,10 @@ class Directory {
typedef std::set<EntryKernel*, LessField<IdField, ID> > IdsIndex;
// All entries in memory must be in both the MetahandlesIndex and
// the IdsIndex, but only non-deleted entries will be the
- // ParentIdAndNamesIndex, because there can be multiple deleted
- // entries with the same parent id and name.
- typedef std::set<EntryKernel*, LessParentIdAndNames> ParentIdAndNamesIndex;
+ // ParentIdChildIndex.
+ // This index contains EntryKernels ordered by parent ID and metahandle.
+ // It allows efficient lookup of the children of a given parent.
+ typedef std::set<EntryKernel*, LessParentIdAndHandle> ParentIdChildIndex;
typedef std::vector<int64> MetahandlesToPurge;
private:
@@ -1116,7 +865,7 @@ class Directory {
Lock mutex;
MetahandlesIndex* metahandles_index; // Entries indexed by metahandle
IdsIndex* ids_index; // Entries indexed by id
- ParentIdAndNamesIndex* parent_id_and_names_index;
+ ParentIdChildIndex* parent_id_child_index;
// So we don't have to create an EntryKernel every time we want to
// look something up in an index. Needle in haystack metaphore.
EntryKernel needle;
@@ -1179,7 +928,7 @@ class ScopedKernelLock {
DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
};
-// Transactions are now processed FIFO (+overlapping reads).
+// Transactions are now processed FIFO with a straight lock
class BaseTransaction {
friend class Entry;
public:
@@ -1254,9 +1003,6 @@ int ComparePathNames16(void*, int a_bytes, const void* a, int b_bytes,
int64 Now();
-// Does wildcard processing.
-BOOL PathNameMatch(const PathString& pathname, const PathString& pathspec);
-
class ExtendedAttribute {
public:
ExtendedAttribute(BaseTransaction* trans, GetByHandle,
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
index cd6f281..92caf64 100644..100755
--- a/chrome/browser/sync/syncable/syncable_columns.h
+++ b/chrome/browser/sync/syncable/syncable_columns.h
@@ -50,11 +50,8 @@ static const ColumnSpec g_metas_columns[] = {
{"server_is_bookmark_object", "bit default 0"},
//////////////////////////////////////
// Strings
- {"name", "varchar(255) COLLATE PATHNAME"},
- {"unsanitized_name", "varchar(255) COLLATE PATHNAME"},
{"non_unique_name", "varchar"},
- {"server_name", "varchar(255) COLLATE PATHNAME"},
- {"server_non_unique_name", "varchar"},
+ {"server_non_unique_name", "varchar(255) COLLATE PATHNAME"},
{"bookmark_url", "varchar"},
{"server_bookmark_url", "varchar"},
{"singleton_tag", "varchar"},
diff --git a/chrome/browser/sync/syncable/syncable_unittest.cc b/chrome/browser/sync/syncable/syncable_unittest.cc
index d5bcd8c..2911a46 100644..100755
--- a/chrome/browser/sync/syncable/syncable_unittest.cc
+++ b/chrome/browser/sync/syncable/syncable_unittest.cc
@@ -42,6 +42,7 @@
#include "chrome/browser/sync/util/path_helpers.h"
#include "chrome/browser/sync/util/query_helpers.h"
#include "chrome/test/sync/engine/test_id_factory.h"
+#include "chrome/test/sync/engine/test_syncable_utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/sqlite/preprocessed/sqlite3.h"
@@ -52,6 +53,7 @@ using std::string;
namespace syncable {
+namespace {
// A lot of these tests were written expecting to be able to read and write
// object data on entries. However, the design has changed.
void PutDataAsExtendedAttribute(WriteTransaction* wtrans,
@@ -68,106 +70,96 @@ void ExpectDataFromExtendedAttributeEquals(BaseTransaction* trans,
Entry* e,
const char* bytes,
size_t bytes_length) {
+ ASSERT_TRUE(e->good());
Blob expected_value(bytes, bytes + bytes_length);
ExtendedAttributeKey key(e->Get(META_HANDLE), PSTR("DATA"));
ExtendedAttribute attr(trans, GET_BY_HANDLE, key);
EXPECT_FALSE(attr.is_deleted());
EXPECT_EQ(expected_value, attr.value());
}
-
+} // namespace
TEST(Syncable, General) {
remove("SimpleTest.sqlite3");
Directory dir;
FilePath test_db(FILE_PATH_LITERAL("SimpleTest.sqlite3"));
dir.Open(test_db, PSTR("SimpleTest"));
- bool entry_exists = false;
- int64 metahandle;
+
+ int64 written_metahandle;
const Id id = TestIdFactory::FromNumber(99);
- // Test simple read operations.
+ PathString name = PSTR("Jeff");
+ // Test simple read operations on an empty DB.
{
ReadTransaction rtrans(&dir, __FILE__, __LINE__);
Entry e(&rtrans, GET_BY_ID, id);
- if (e.good()) {
- entry_exists = true;
- metahandle = e.Get(META_HANDLE);
- }
+ ASSERT_FALSE(e.good()); // Hasn't been written yet.
+
Directory::ChildHandles child_handles;
dir.GetChildHandles(&rtrans, rtrans.root_id(), &child_handles);
- for (Directory::ChildHandles::iterator i = child_handles.begin();
- i != child_handles.end(); ++i)
- cout << *i << endl;
-
- Entry e2(&rtrans, GET_BY_PATH, PSTR("/Hello\\World/"));
+ EXPECT_TRUE(child_handles.empty());
}
// Test creating a new meta entry.
{
WriteTransaction wtrans(&dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), PSTR("Jeff"));
- ASSERT_TRUE(entry_exists ? !me.good() : me.good());
- if (me.good()) {
- me.Put(ID, id);
- me.Put(BASE_VERSION, 1);
- metahandle = me.Get(META_HANDLE);
+ MutableEntry me(&wtrans, CREATE, wtrans.root_id(), name);
+ ASSERT_TRUE(me.good());
+ me.Put(ID, id);
+ me.Put(BASE_VERSION, 1);
+ written_metahandle = me.Get(META_HANDLE);
+ }
+
+ // Test GetChildHandles after something is now in the DB.
+ // Also check that GET_BY_ID works.
+ {
+ ReadTransaction rtrans(&dir, __FILE__, __LINE__);
+ Entry e(&rtrans, GET_BY_ID, id);
+ ASSERT_TRUE(e.good());
+
+ Directory::ChildHandles child_handles;
+ dir.GetChildHandles(&rtrans, rtrans.root_id(), &child_handles);
+ EXPECT_EQ(1u, child_handles.size());
+
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ EXPECT_EQ(*i, written_metahandle);
}
}
- // Test writing data to an entity.
+ // Test writing data to an entity. Also check that GET_BY_HANDLE works.
static const char s[] = "Hello World.";
{
WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&trans, GET_BY_PATH,
- PathString(kPathSeparator) + PSTR("Jeff"));
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
ASSERT_TRUE(e.good());
PutDataAsExtendedAttribute(&trans, &e, s, sizeof(s));
}
- // Test reading back the name contents that we just wrote.
+ // Test reading back the contents that we just wrote.
{
WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&trans, GET_BY_PATH,
- PathString(kPathSeparator) + PSTR("Jeff"));
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
ASSERT_TRUE(e.good());
ExpectDataFromExtendedAttributeEquals(&trans, &e, s, sizeof(s));
}
+ // Verify it exists in the folder.
+ {
+ ReadTransaction rtrans(&dir, __FILE__, __LINE__);
+ EXPECT_EQ(1, CountEntriesWithName(&rtrans, rtrans.root_id(), name));
+ }
+
// Now delete it.
{
WriteTransaction trans(&dir, UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&trans, CREATE, trans.root_id(), PSTR("New File"));
+ MutableEntry e(&trans, GET_BY_HANDLE, written_metahandle);
e.Put(IS_DEL, true);
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), name));
}
dir.SaveChanges();
-}
-
-TEST(Syncable, NameClassTest) {
- const PathString foo(PSTR("foo"));
- const PathString bar(PSTR("bar"));
-
- Name name1(foo);
- EXPECT_EQ(name1.value(), foo);
- EXPECT_EQ(name1.db_value(), foo);
- EXPECT_FALSE(name1.HasBeenSanitized());
- EXPECT_TRUE(name1.GetUnsanitizedName().empty());
-
- Name name2(foo, foo);
- EXPECT_EQ(name2.value(), foo);
- EXPECT_EQ(name2.db_value(), foo);
- EXPECT_FALSE(name2.HasBeenSanitized());
- EXPECT_TRUE(name2.GetUnsanitizedName().empty());
-
- Name name3(foo, bar);
- EXPECT_EQ(name3.value(), bar);
- EXPECT_EQ(name3.db_value(), foo);
- EXPECT_TRUE(name3.HasBeenSanitized());
- EXPECT_EQ(name3.GetUnsanitizedName(), bar);
-
- EXPECT_TRUE(name1 == name2);
- EXPECT_FALSE(name1 != name2);
- EXPECT_FALSE(name2 == name3);
- EXPECT_TRUE(name2 != name3);
+ remove("SimpleTest.sqlite3");
}
namespace {
@@ -260,16 +252,6 @@ TEST_F(SyncableDirectoryTest, TestBasicLookupValidID) {
ASSERT_TRUE(e.good());
}
-TEST_F(SyncableDirectoryTest, TestBasicCaseSensitivity) {
- PathString name = PSTR("RYAN");
- PathString conflicting_name = PSTR("ryan");
- CreateEntry(name);
-
- WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry me(&wtrans, CREATE, wtrans.root_id(), conflicting_name);
- ASSERT_FALSE(me.good());
-}
-
TEST_F(SyncableDirectoryTest, TestDelete) {
PathString name = PSTR("peanut butter jelly time");
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
@@ -283,15 +265,13 @@ TEST_F(SyncableDirectoryTest, TestDelete) {
ASSERT_TRUE(e3.good());
ASSERT_TRUE(e3.Put(IS_DEL, true));
+ ASSERT_TRUE(e1.Put(IS_DEL, false));
+ ASSERT_TRUE(e2.Put(IS_DEL, false));
ASSERT_TRUE(e3.Put(IS_DEL, false));
- ASSERT_FALSE(e1.Put(IS_DEL, false));
- ASSERT_FALSE(e2.Put(IS_DEL, false));
- ASSERT_TRUE(e3.Put(IS_DEL, true));
- ASSERT_TRUE(e1.Put(IS_DEL, false));
- ASSERT_FALSE(e2.Put(IS_DEL, false));
- ASSERT_FALSE(e3.Put(IS_DEL, false));
ASSERT_TRUE(e1.Put(IS_DEL, true));
+ ASSERT_TRUE(e2.Put(IS_DEL, true));
+ ASSERT_TRUE(e3.Put(IS_DEL, true));
}
TEST_F(SyncableDirectoryTest, TestGetUnsynced) {
@@ -462,7 +442,7 @@ TEST_F(SyncableDirectoryTest, DeleteBug_531383) {
ASSERT_TRUE(twin.good());
ASSERT_TRUE(twin.Put(IS_DEL, true));
ASSERT_TRUE(grandchild.Put(IS_DEL, false));
- ASSERT_FALSE(twin.Put(IS_DEL, false));
+
grandchild_handle = grandchild.Get(META_HANDLE);
twin_handle = twin.Get(META_HANDLE);
}
@@ -531,86 +511,98 @@ TEST_F(SyncableDirectoryTest, TestIsLegalNewParent) {
ASSERT_FALSE(IsLegalNewParent(parent, grandchild));
}
-TEST_F(SyncableDirectoryTest, TestFindEntryInFolder) {
+TEST_F(SyncableDirectoryTest, TestEntryIsInFolder) {
// Create a subdir and an entry.
int64 entry_handle;
+ syncable::Id folder_id;
+ syncable::Id entry_id;
+ PathString entry_name = PSTR("entry");
+
{
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
ASSERT_TRUE(folder.good());
EXPECT_TRUE(folder.Put(IS_DIR, true));
EXPECT_TRUE(folder.Put(IS_UNSYNCED, true));
- MutableEntry entry(&trans, CREATE, folder.Get(ID), PSTR("entry"));
+ folder_id = folder.Get(ID);
+
+ MutableEntry entry(&trans, CREATE, folder.Get(ID), entry_name);
ASSERT_TRUE(entry.good());
entry_handle = entry.Get(META_HANDLE);
entry.Put(IS_UNSYNCED, true);
+ entry_id = entry.Get(ID);
}
// Make sure we can find the entry in the folder.
{
ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PATH, PathString(kPathSeparator) +
- PSTR("folder") +
- kPathSeparator + PSTR("entry"));
- ASSERT_TRUE(entry.good());
- ASSERT_TRUE(entry.Get(META_HANDLE) == entry_handle);
- }
-}
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), entry_name));
+ EXPECT_EQ(1, CountEntriesWithName(&trans, folder_id, entry_name));
-TEST_F(SyncableDirectoryTest, TestGetByParentIdAndName) {
- PathString name = PSTR("Bob");
- Id id = TestIdFactory::MakeServer("ID for Bob");
- {
- WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&wtrans, CREATE, wtrans.root_id() /*entry id*/, name);
+ Entry entry(&trans, GET_BY_ID, entry_id);
ASSERT_TRUE(entry.good());
- entry.Put(IS_DIR, true);
- entry.Put(ID, id);
- entry.Put(BASE_VERSION, 1);
- entry.Put(IS_UNSYNCED, true);
- }
- {
- WriteTransaction wtrans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry entry(&wtrans, GET_BY_PARENTID_AND_NAME, wtrans.root_id(),
- name);
- ASSERT_TRUE(entry.good());
- ASSERT_TRUE(id == entry.Get(ID));
- }
- {
- ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
- Entry entry(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), name);
- ASSERT_TRUE(entry.good());
- ASSERT_TRUE(id == entry.Get(ID));
+ EXPECT_EQ(entry_handle, entry.Get(META_HANDLE));
+ EXPECT_TRUE(entry.Get(NON_UNIQUE_NAME) == entry_name);
+ EXPECT_TRUE(entry.Get(PARENT_ID) == folder_id);
}
}
-TEST_F(SyncableDirectoryTest, TestParentIDIndexUpdate) {
+TEST_F(SyncableDirectoryTest, TestParentIdIndexUpdate) {
+ PathString child_name = PSTR("child");
+
WriteTransaction wt(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry folder(&wt, CREATE, wt.root_id(), PSTR("oldname"));
- folder.Put(NAME, PSTR("newname"));
- folder.Put(IS_UNSYNCED, true);
- Entry entry(&wt, GET_BY_PATH, PSTR("newname"));
- ASSERT_TRUE(entry.good());
+ MutableEntry parent_folder(&wt, CREATE, wt.root_id(), PSTR("folder1"));
+ parent_folder.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(parent_folder.Put(IS_DIR, true));
+
+ MutableEntry parent_folder2(&wt, CREATE, wt.root_id(), PSTR("folder2"));
+ parent_folder2.Put(IS_UNSYNCED, true);
+ EXPECT_TRUE(parent_folder2.Put(IS_DIR, true));
+
+ MutableEntry child(&wt, CREATE, parent_folder.Get(ID), child_name);
+ EXPECT_TRUE(child.Put(IS_DIR, true));
+ child.Put(IS_UNSYNCED, true);
+
+ ASSERT_TRUE(child.good());
+
+ EXPECT_EQ(0, CountEntriesWithName(&wt, wt.root_id(), child_name));
+ EXPECT_EQ(parent_folder.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
+ EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
+ child.Put(PARENT_ID, parent_folder2.Get(ID));
+ EXPECT_EQ(parent_folder2.Get(ID), child.Get(PARENT_ID));
+ EXPECT_EQ(0, CountEntriesWithName(&wt, parent_folder.Get(ID), child_name));
+ EXPECT_EQ(1, CountEntriesWithName(&wt, parent_folder2.Get(ID), child_name));
+
}
TEST_F(SyncableDirectoryTest, TestNoReindexDeletedItems) {
+ PathString folder_name = PSTR("folder");
+ PathString new_name = PSTR("new_name");
+
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("folder"));
+ MutableEntry folder(&trans, CREATE, trans.root_id(), folder_name);
ASSERT_TRUE(folder.good());
ASSERT_TRUE(folder.Put(IS_DIR, true));
ASSERT_TRUE(folder.Put(IS_DEL, true));
- Entry gone(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), PSTR("folder"));
- ASSERT_FALSE(gone.good());
- ASSERT_TRUE(folder.PutParentIdAndName(trans.root_id(),
- Name(PSTR("new_name"))));
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
+
+ MutableEntry deleted(&trans, GET_BY_ID, folder.Get(ID));
+ ASSERT_TRUE(deleted.good());
+ ASSERT_TRUE(deleted.Put(PARENT_ID, trans.root_id()));
+ ASSERT_TRUE(deleted.Put(NON_UNIQUE_NAME, new_name));
+
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), folder_name));
+ EXPECT_EQ(0, CountEntriesWithName(&trans, trans.root_id(), new_name));
}
TEST_F(SyncableDirectoryTest, TestCaseChangeRename) {
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
MutableEntry folder(&trans, CREATE, trans.root_id(), PSTR("CaseChange"));
ASSERT_TRUE(folder.good());
- EXPECT_TRUE(folder.PutParentIdAndName(trans.root_id(),
- Name(PSTR("CASECHANGE"))));
+ EXPECT_TRUE(folder.Put(PARENT_ID, trans.root_id()));
+ EXPECT_TRUE(folder.Put(NON_UNIQUE_NAME, PSTR("CASECHANGE")));
EXPECT_TRUE(folder.Put(IS_DEL, true));
}
@@ -633,24 +625,29 @@ TEST_F(SyncableDirectoryTest, TestShareInfo) {
}
TEST_F(SyncableDirectoryTest, TestSimpleFieldsPreservedDuringSaveChanges) {
- Id id = TestIdFactory::FromNumber(1);
+ Id update_id = TestIdFactory::FromNumber(1);
+ Id create_id;
EntryKernel create_pre_save, update_pre_save;
EntryKernel create_post_save, update_post_save;
+ PathString create_name = PSTR("Create");
+
{
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry create(&trans, CREATE, trans.root_id(), PSTR("Create"));
- MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, id);
+ MutableEntry create(&trans, CREATE, trans.root_id(), create_name);
+ MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, update_id);
create.Put(IS_UNSYNCED, true);
update.Put(IS_UNAPPLIED_UPDATE, true);
create_pre_save = create.GetKernelCopy();
update_pre_save = update.GetKernelCopy();
+ create_id = create.Get(ID);
}
dir_->SaveChanges();
+
{
ReadTransaction trans(dir_.get(), __FILE__, __LINE__);
- Entry create(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(),
- PSTR("Create"));
- Entry update(&trans, GET_BY_ID, id);
+ Entry create(&trans, GET_BY_ID, create_id);
+ EXPECT_EQ(1, CountEntriesWithName(&trans, trans.root_id(), create_name));
+ Entry update(&trans, GET_BY_ID, update_id);
create_post_save = create.GetKernelCopy();
update_post_save = update.GetKernelCopy();
}
@@ -713,8 +710,8 @@ TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) {
MutableEntry aguilera(&trans, GET_BY_HANDLE, handle1);
ASSERT_TRUE(aguilera.good());
- aguilera.Put(NAME, PSTR("christina"));
- ASSERT_TRUE(aguilera.GetKernelCopy().dirty[NAME]);
+ aguilera.Put(NON_UNIQUE_NAME, PSTR("christina"));
+ ASSERT_TRUE(aguilera.GetKernelCopy().dirty[NON_UNIQUE_NAME]);
MutableEntry kids_on_block(&trans, CREATE, trans.root_id(), PSTR("kids"));
ASSERT_TRUE(kids_on_block.good());
@@ -738,7 +735,7 @@ TEST_F(SyncableDirectoryTest, TestSaveChangesFailure) {
Entry kids_on_block(&trans, GET_BY_HANDLE, handle2);
ASSERT_TRUE(kids_on_block.good());
- EXPECT_TRUE(aguilera.dirty[NAME]);
+ EXPECT_TRUE(aguilera.dirty[NON_UNIQUE_NAME]);
EXPECT_TRUE(kids_on_block.Get(IS_NEW));
}
}
@@ -749,8 +746,9 @@ void SyncableDirectoryTest::ValidateEntry(BaseTransaction* trans, int64 id,
bool is_del) {
Entry e(trans, GET_BY_ID, TestIdFactory::FromNumber(id));
ASSERT_TRUE(e.good());
- if (check_name)
- ASSERT_TRUE(name == e.Get(NAME));
+ if (check_name) {
+ ASSERT_TRUE(name == e.Get(NON_UNIQUE_NAME));
+ }
ASSERT_TRUE(base_version == e.Get(BASE_VERSION));
ASSERT_TRUE(server_version == e.Get(SERVER_VERSION));
ASSERT_TRUE(is_del == e.Get(IS_DEL));
@@ -891,6 +889,7 @@ class DirectoryKernelStalenessBugDelegate : public ThreadBugDelegate {
const char test_bytes[] = "test data";
const PathString dirname = PSTR("DirectoryKernelStalenessBug");
AutoLock scoped_lock(step_->mutex);
+ const Id jeff_id = TestIdFactory::FromNumber(100);
while (step_->number < 4) {
while (step_->number % 2 != role_) {
@@ -909,7 +908,7 @@ class DirectoryKernelStalenessBugDelegate : public ThreadBugDelegate {
WriteTransaction trans(dir, UNITTEST, __FILE__, __LINE__);
MutableEntry me(&trans, CREATE, trans.root_id(), PSTR("Jeff"));
me.Put(BASE_VERSION, 1);
- me.Put(ID, TestIdFactory::FromNumber(100));
+ me.Put(ID, jeff_id);
PutDataAsExtendedAttribute(&trans, &me, test_bytes,
sizeof(test_bytes));
}
@@ -938,7 +937,7 @@ class DirectoryKernelStalenessBugDelegate : public ThreadBugDelegate {
ScopedDirLookup dir(directory_manager_, dirname);
CHECK(dir.good());
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry e(&trans, GET_BY_PATH, PSTR("Jeff"));
+ Entry e(&trans, GET_BY_ID, jeff_id);
ExpectDataFromExtendedAttributeEquals(&trans, &e, test_bytes,
sizeof(test_bytes));
}
@@ -994,9 +993,8 @@ class StressTransactionsDelegate : public PlatformThread::Delegate {
const int rand_action = rand() % 10;
if (rand_action < 4 && !path_name.empty()) {
ReadTransaction trans(dir, __FILE__, __LINE__);
- Entry e(&trans, GET_BY_PARENTID_AND_NAME, trans.root_id(), path_name);
+ CHECK(1 == CountEntriesWithName(&trans, trans.root_id(), path_name));
PlatformThread::Sleep(rand() % 10);
- CHECK(e.good());
} else {
string unique_name = StringPrintf("%d.%d", thread_number_,
entry_count++);
@@ -1071,31 +1069,6 @@ TEST(Syncable, ComparePathNames) {
}
}
-#if defined(OS_WIN)
-TEST(Syncable, PathNameMatch) {
- // basic stuff, not too many otherwise we're testing the os.
- EXPECT_TRUE(PathNameMatch(PSTR("bob"), PSTR("bob")));
- EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("fred")));
- // Test our ; extension.
- EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo;b")));
- EXPECT_TRUE(PathNameMatch(PSTR("bo;b"), PSTR("bo*")));
- EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co;b")));
- EXPECT_FALSE(PathNameMatch(PSTR("bo;b"), PSTR("co*")));
- // Test our fixes for prepended spaces.
- EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bo*")));
- EXPECT_TRUE(PathNameMatch(PSTR(" bob"), PSTR(" bob")));
- EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR(" bob")));
- EXPECT_FALSE(PathNameMatch(PSTR(" bob"), PSTR("bob")));
- // Combo test.
- EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;o*")));
- EXPECT_TRUE(PathNameMatch(PSTR(" b;ob"), PSTR(" b;ob")));
- EXPECT_FALSE(PathNameMatch(PSTR("b;ob"), PSTR(" b;ob")));
- EXPECT_FALSE(PathNameMatch(PSTR(" b;ob"), PSTR("b;ob")));
- // other whitespace should give no matches.
- EXPECT_FALSE(PathNameMatch(PSTR("bob"), PSTR("\tbob")));
-}
-#endif // defined(OS_WIN)
-
void FakeSync(MutableEntry* e, const char* fake_id) {
e->Put(IS_UNSYNCED, false);
e->Put(BASE_VERSION, 2);
@@ -1104,11 +1077,11 @@ void FakeSync(MutableEntry* e, const char* fake_id) {
TEST_F(SyncableDirectoryTest, Bug1509232) {
const PathString a = PSTR("alpha");
-
- CreateEntry(a, dir_.get()->NextId());
+ const Id entry_id = dir_.get()->NextId();
+ CreateEntry(a, entry_id);
{
WriteTransaction trans(dir_.get(), UNITTEST, __FILE__, __LINE__);
- MutableEntry e(&trans, GET_BY_PATH, a);
+ MutableEntry e(&trans, GET_BY_ID, entry_id);
ASSERT_TRUE(e.good());
ExtendedAttributeKey key(e.Get(META_HANDLE), PSTR("resourcefork"));
MutableExtendedAttribute ext(&trans, CREATE, key);
diff --git a/chrome/chrome.gyp b/chrome/chrome.gyp
index b72ec0a..15cc7ac 100755
--- a/chrome/chrome.gyp
+++ b/chrome/chrome.gyp
@@ -5509,6 +5509,8 @@
'test/sync/engine/test_directory_setter_upper.cc',
'test/sync/engine/test_directory_setter_upper.h',
'test/sync/engine/test_id_factory.h',
+ 'test/sync/engine/test_syncable_utils.cc',
+ 'test/sync/engine/test_syncable_utils.h',
],
'include_dirs': [
'..',
diff --git a/chrome/test/sync/engine/mock_server_connection.cc b/chrome/test/sync/engine/mock_server_connection.cc
index 28525b2..fdfb334 100644..100755
--- a/chrome/test/sync/engine/mock_server_connection.cc
+++ b/chrome/test/sync/engine/mock_server_connection.cc
@@ -41,7 +41,7 @@ MockConnectionManager::MockConnectionManager(DirectoryManager* dirmgr,
fail_next_postbuffer_(false),
directory_manager_(dirmgr),
directory_name_(name),
- mid_commit_callback_function_(NULL),
+ mid_commit_callback_(NULL),
mid_commit_observer_(NULL),
throttling_(false),
fail_non_periodic_get_updates_(false),
@@ -59,9 +59,8 @@ void MockConnectionManager::SetCommitTimeRename(string prepend) {
commit_time_rename_prepended_string_ = prepend;
}
-void MockConnectionManager::SetMidCommitCallbackFunction(
- MockConnectionManager::TestCallbackFunction callback) {
- mid_commit_callback_function_ = callback;
+void MockConnectionManager::SetMidCommitCallback(Closure* callback) {
+ mid_commit_callback_ = callback;
}
void MockConnectionManager::SetMidCommitObserver(
@@ -131,9 +130,8 @@ bool MockConnectionManager::PostBufferToPath(const PostBufferParams* params,
}
response.SerializeToString(params->buffer_out);
- if (mid_commit_callback_function_) {
- if (mid_commit_callback_function_(directory))
- mid_commit_callback_function_ = 0;
+ if (mid_commit_callback_) {
+ mid_commit_callback_->Run();
}
if (mid_commit_observer_) {
mid_commit_observer_->Observe();
diff --git a/chrome/test/sync/engine/mock_server_connection.h b/chrome/test/sync/engine/mock_server_connection.h
index f4a110c..e8fa974 100644..100755
--- a/chrome/test/sync/engine/mock_server_connection.h
+++ b/chrome/test/sync/engine/mock_server_connection.h
@@ -13,6 +13,7 @@
#include "chrome/browser/sync/engine/net/server_connection_manager.h"
#include "chrome/browser/sync/protocol/sync.pb.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
+#include "chrome/browser/sync/util/closure.h"
using std::string;
using std::vector;
@@ -27,10 +28,6 @@ struct HttpResponse;
class MockConnectionManager : public browser_sync::ServerConnectionManager {
public:
- // A callback function type. These can be set to be called when server
- // activity would normally take place. This aids simulation of race
- // conditions.
- typedef bool (*TestCallbackFunction)(syncable::Directory* dir);
class MidCommitObserver {
public:
virtual void Observe() = 0;
@@ -49,7 +46,7 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
virtual bool IsUserAuthenticated();
// Control of commit response.
- void SetMidCommitCallbackFunction(TestCallbackFunction callback);
+ void SetMidCommitCallback(Closure* callback);
void SetMidCommitObserver(MidCommitObserver* observer);
// Set this if you want commit to perform commit time rename. Will request
@@ -209,7 +206,7 @@ class MockConnectionManager : public browser_sync::ServerConnectionManager {
// The updates we'll return to the next request.
sync_pb::GetUpdatesResponse updates_;
- TestCallbackFunction mid_commit_callback_function_;
+ Closure* mid_commit_callback_;
MidCommitObserver* mid_commit_observer_;
// The AUTHENTICATE response we'll return for auth requests.
diff --git a/chrome/test/sync/engine/test_syncable_utils.cc b/chrome/test/sync/engine/test_syncable_utils.cc
new file mode 100755
index 0000000..e01d3f0
--- /dev/null
+++ b/chrome/test/sync/engine/test_syncable_utils.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utilities to verify the state of items in unit tests.
+
+#include "chrome/test/sync/engine/test_syncable_utils.h"
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+namespace syncable {
+
+int CountEntriesWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name) {
+ Directory::ChildHandles child_handles;
+ rtrans->directory()->GetChildHandles(rtrans, parent_id, &child_handles);
+ if (child_handles.size() <= 0) {
+ return 0;
+ }
+
+ int number_of_entries_with_name = 0;
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ Entry e(rtrans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ if (e.Get(NON_UNIQUE_NAME) == name) {
+ ++number_of_entries_with_name;
+ }
+ }
+ return number_of_entries_with_name;
+}
+
+Id GetFirstEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name) {
+ Directory::ChildHandles child_handles;
+ rtrans->directory()->GetChildHandles(rtrans, parent_id, &child_handles);
+
+ for (Directory::ChildHandles::iterator i = child_handles.begin();
+ i != child_handles.end(); ++i) {
+ Entry e(rtrans, GET_BY_HANDLE, *i);
+ CHECK(e.good());
+ if (e.Get(NON_UNIQUE_NAME) == name) {
+ return e.Get(ID);
+ }
+ }
+
+ CHECK(false);
+ return Id();
+}
+
+Id GetOnlyEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name) {
+ CHECK(1 == CountEntriesWithName(rtrans, parent_id, name));
+ return GetFirstEntryWithName(rtrans, parent_id, name);
+}
+
+} // namespace syncable
diff --git a/chrome/test/sync/engine/test_syncable_utils.h b/chrome/test/sync/engine/test_syncable_utils.h
new file mode 100755
index 0000000..4d6ce9c
--- /dev/null
+++ b/chrome/test/sync/engine/test_syncable_utils.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Utilities that are useful in verifying the state of items in a
+// syncable database.
+
+#ifndef CHROME_TEST_SYNC_ENGINE_TEST_SYNCABLE_UTILS_H_
+#define CHROME_TEST_SYNC_ENGINE_TEST_SYNCABLE_UTILS_H_
+
+#include "chrome/browser/sync/syncable/syncable.h"
+
+namespace syncable {
+
+class BaseTransaction;
+class Id;
+
+// Count the number of entries with a given name inside of a parent.
+// Useful to check folder structure and for porting older tests that
+// rely on uniqueness inside of folders.
+int CountEntriesWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name);
+
+// Get the first entry ID with name in a parent. The entry *must* exist.
+Id GetFirstEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name);
+
+// Assert that there's only one entry by this name in this parent.
+// Return the Id.
+Id GetOnlyEntryWithName(BaseTransaction* rtrans,
+ const syncable::Id& parent_id,
+ const PathString& name);
+
+} // namespace syncable
+
+#endif // CHROME_TEST_SYNC_ENGINE_TEST_SYNCABLE_UTILS_H_