summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-19 23:24:23 +0000
committerrlarocque@chromium.org <rlarocque@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-19 23:24:23 +0000
commit2282bd59af40ed835d48df67557aa1e9a80f22e3 (patch)
tree3544eeb9a990fb294b595d48495ae44b58f6ecab
parent565758b5f9723fca2b71bb2dc411199d866042a6 (diff)
downloadchromium_src-2282bd59af40ed835d48df67557aa1e9a80f22e3.zip
chromium_src-2282bd59af40ed835d48df67557aa1e9a80f22e3.tar.gz
chromium_src-2282bd59af40ed835d48df67557aa1e9a80f22e3.tar.bz2
Remove code related to counting conflicts
There used to be a bug which caused us to count conflicts incorrectly. We would count each conflict or conflict set once (by adding two to the count), then decrement it by one. The problem is that the increment happened only whent he conflict resolution function was called for the worker to which the conflict belonged, while the decrement would be carried out by each worker. This meant that the conflict never got very high. Fixing issue 97832 brought this code back from the dead. The number of workers involved in a sync cycle was greatly reduced. So the counts actually could exceed 2 and the extra-special, tough conflict resolution code could get triggered. This side-effect was completely unintentional. We've decided that we would like to go back to the old, bug-induced behaviour. This commit removes the buggy counting code and the special case, called only on high count conflict processing functions. This should restore the old behaviour that we've grown attached to. This change also obsoletes issue 46621. The code to be tested by that unit test has been deleted. BUG=107816, 97832, 46621 TEST= Review URL: http://codereview.chromium.org/8976008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115037 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc334
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.h26
-rw-r--r--chrome/browser/sync/engine/syncer.cc10
-rw-r--r--chrome/browser/sync/engine/syncer.h7
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc802
5 files changed, 16 insertions, 1163 deletions
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
index e06d02d..ff0127a6 100644
--- a/chrome/browser/sync/engine/conflict_resolver.cc
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -192,269 +192,6 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
}
}
-ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
- ConflictSet* set) {
- // TODO(sync): Come up with a better scheme for set hashing. This scheme
- // will make debugging easy.
- // If this call to sort is removed, we need to add one before we use
- // binary_search in ProcessConflictSet.
- sort(set->begin(), set->end());
- std::stringstream rv;
- for (ConflictSet::iterator i = set->begin() ; i != set->end() ; ++i )
- rv << *i << ".";
- return rv.str();
-}
-
-namespace {
-
-bool AttemptToFixCircularConflict(WriteTransaction* trans,
- ConflictSet* conflict_set) {
- UMA_HISTOGRAM_COUNTS("Sync.ConflictFixCircularity", 1);
- ConflictSet::const_iterator i;
- for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
- MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
- if (entryi.Get(syncable::PARENT_ID) ==
- entryi.Get(syncable::SERVER_PARENT_ID) ||
- !entryi.Get(syncable::IS_UNAPPLIED_UPDATE) ||
- !entryi.Get(syncable::IS_DIR)) {
- continue;
- }
- Id parentid = entryi.Get(syncable::SERVER_PARENT_ID);
- // Create the entry here as it's the only place we could ever get a parentid
- // that doesn't correspond to a real entry.
- Entry parent(trans, syncable::GET_BY_ID, parentid);
- if (!parent.good()) // server parent update not received yet
- continue;
- // This loop walks upwards from the server parent. If we hit the root (0)
- // all is well. If we hit the entry we're examining it means applying the
- // parent id would cause a loop. We don't need more general loop detection
- // because we know our local tree is valid.
- while (!parentid.IsRoot()) {
- Entry parent(trans, syncable::GET_BY_ID, parentid);
- CHECK(parent.good());
- if (parentid == *i)
- break; // It's a loop.
- parentid = parent.Get(syncable::PARENT_ID);
- }
- if (parentid.IsRoot())
- continue;
- DVLOG(1) << "Overwriting server changes to avoid loop: " << entryi;
- entryi.Put(syncable::BASE_VERSION, entryi.Get(syncable::SERVER_VERSION));
- entryi.Put(syncable::IS_UNSYNCED, true);
- entryi.Put(syncable::IS_UNAPPLIED_UPDATE, false);
- // METRIC conflict resolved by breaking dir loop.
- return true;
- }
- return false;
-}
-
-bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
- ConflictSet* conflict_set,
- const Entry& entry) {
- if (!entry.Get(syncable::IS_UNSYNCED) || entry.Get(syncable::IS_DEL))
- return false;
- Id parentid = entry.Get(syncable::PARENT_ID);
- MutableEntry parent(trans, syncable::GET_BY_ID, parentid);
- if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
- !parent.Get(syncable::SERVER_IS_DEL) ||
- !binary_search(conflict_set->begin(), conflict_set->end(), parentid))
- return false;
- // We're trying to commit into a directory tree that's been deleted. To
- // solve this we recreate the directory tree.
- //
- // We do this in two parts, first we ensure the tree is unaltered since the
- // conflict was detected.
- Id id = parentid;
- while (!id.IsRoot()) {
- if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
- break;
- Entry parent(trans, syncable::GET_BY_ID, id);
- if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
- !parent.Get(syncable::SERVER_IS_DEL))
- return false;
- id = parent.Get(syncable::PARENT_ID);
- }
- // Now we fix up the entries.
- id = parentid;
- while (!id.IsRoot()) {
- MutableEntry parent(trans, syncable::GET_BY_ID, id);
- if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
- break;
- DVLOG(1) << "Giving directory a new id so we can undelete it " << parent;
- ClearServerData(&parent);
- SyncerUtil::ChangeEntryIDAndUpdateChildren(trans, &parent,
- trans->directory()->NextId());
- parent.Put(syncable::BASE_VERSION, 0);
- parent.Put(syncable::IS_UNSYNCED, true);
- id = parent.Get(syncable::PARENT_ID);
- // METRIC conflict resolved by recreating dir tree.
- }
- return true;
-}
-
-// TODO(chron): needs unit test badly
-bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
- ConflictSet* conflict_set,
- const Entry& entry) {
- if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) ||
- entry.Get(syncable::SERVER_IS_DEL))
- return false;
- Id parent_id = entry.Get(syncable::SERVER_PARENT_ID);
- MutableEntry parent(trans, syncable::GET_BY_ID, parent_id);
- if (!parent.good() || !parent.Get(syncable::IS_DEL) ||
- !binary_search(conflict_set->begin(), conflict_set->end(), parent_id)) {
- return false;
- }
- // We've deleted a directory tree that's got contents on the server. We
- // recreate the directory to solve the problem.
- //
- // We do this in two parts, first we ensure the tree is unaltered since
- // the conflict was detected.
- Id id = parent_id;
- // As we will be crawling the path of deleted entries there's a chance we'll
- // end up having to reparent an item as there will be an invalid parent.
- Id reroot_id = syncable::GetNullId();
- // Similarly crawling deleted items means we risk loops.
- int loop_detection = conflict_set->size();
- while (!id.IsRoot() && --loop_detection >= 0) {
- Entry parent(trans, syncable::GET_BY_ID, id);
- // If we get a bad parent, or a parent that's deleted on client and server
- // we recreate the hierarchy in the root.
- if (!parent.good()) {
- reroot_id = id;
- break;
- }
- CHECK(parent.Get(syncable::IS_DIR));
- if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
- // We've got to an entry that's not in the set. If it has been deleted
- // between set building and this point in time we return false. If it had
- // been deleted earlier it would have been in the set.
- // TODO(sync): Revisit syncer code organization to see if conflict
- // resolution can be done in the same transaction as set building.
- if (parent.Get(syncable::IS_DEL))
- return false;
- break;
- }
- if (!parent.Get(syncable::IS_DEL) ||
- parent.Get(syncable::SERVER_IS_DEL) ||
- !parent.Get(syncable::IS_UNSYNCED)) {
- return false;
- }
- id = parent.Get(syncable::PARENT_ID);
- }
- // If we find we've been looping we re-root the hierarchy.
- if (loop_detection < 0) {
- if (id == entry.Get(syncable::ID))
- reroot_id = entry.Get(syncable::PARENT_ID);
- else
- reroot_id = id;
- }
- // Now we fix things up by undeleting all the folders in the item's path.
- id = parent_id;
- while (!id.IsRoot() && id != reroot_id) {
- if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
- break;
- }
- MutableEntry entry(trans, syncable::GET_BY_ID, id);
-
- DVLOG(1) << "Undoing our deletion of " << entry
- << ", will have name " << entry.Get(syncable::NON_UNIQUE_NAME);
-
- Id parent_id = entry.Get(syncable::PARENT_ID);
- if (parent_id == reroot_id) {
- parent_id = trans->root_id();
- }
- entry.Put(syncable::PARENT_ID, parent_id);
- entry.Put(syncable::IS_DEL, false);
- id = entry.Get(syncable::PARENT_ID);
- // METRIC conflict resolved by recreating dir tree.
- }
- return true;
-}
-
-bool AttemptToFixRemovedDirectoriesWithContent(WriteTransaction* trans,
- ConflictSet* conflict_set) {
- UMA_HISTOGRAM_COUNTS("Sync.ConflictFixRemovedDirectoriesWithContent", 1);
- ConflictSet::const_iterator i;
- for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
- Entry entry(trans, syncable::GET_BY_ID, *i);
- if (AttemptToFixUnsyncedEntryInDeletedServerTree(trans,
- conflict_set, entry)) {
- return true;
- }
- if (AttemptToFixUpdateEntryInDeletedLocalTree(trans, conflict_set, entry))
- return true;
- }
- return false;
-}
-
-} // namespace
-
-// TODO(sync): Eliminate conflict sets. They're not necessary.
-bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
- ConflictSet* conflict_set,
- int conflict_count) {
- int set_size = conflict_set->size();
- if (set_size < 2) {
- LOG(WARNING) << "Skipping conflict set because it has size " << set_size;
- // We can end up with sets of size one if we have a new item in a set that
- // we tried to commit transactionally. This should not be a persistent
- // situation.
- return false;
- }
- if (conflict_count < 3) {
- // Avoid resolving sets that could be the result of transient conflicts.
- // Transient conflicts can occur because the client or server can be
- // slightly out of date.
- return false;
- }
-
- DVLOG(1) << "Fixing a set containing " << set_size << " items";
-
- // Fix circular conflicts.
- if (AttemptToFixCircularConflict(trans, conflict_set))
- return true;
- // Check for problems involving contents of removed folders.
- if (AttemptToFixRemovedDirectoriesWithContent(trans, conflict_set))
- return true;
- return false;
-}
-
-template <typename InputIt>
-bool ConflictResolver::LogAndSignalIfConflictStuck(
- BaseTransaction* trans,
- int attempt_count,
- InputIt begin,
- InputIt end,
- StatusController* status) {
- if (attempt_count < SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT) {
- return false;
- }
-
- // Don't signal stuck if we're not up to date.
- if (status->num_server_changes_remaining() > 0) {
- return false;
- }
-
- LOG(ERROR) << "[BUG] Conflict set cannot be resolved, has "
- << end - begin << " items:";
- for (InputIt i = begin ; i != end ; ++i) {
- Entry e(trans, syncable::GET_BY_ID, *i);
- if (e.good())
- LOG(ERROR) << " " << e;
- else
- LOG(ERROR) << " Bad ID:" << *i;
- }
-
- status->set_syncer_stuck(true);
- UMA_HISTOGRAM_COUNTS("Sync.SyncerConflictStuck", 1);
-
- return true;
- // TODO(sync): If we're stuck for a while we need to alert the user, clear
- // cache or reset syncing. At the very least we should stop trying something
- // that's obviously not working.
-}
-
bool ConflictResolver::ResolveSimpleConflicts(
const ScopedDirLookup& dir,
const ConflictProgress& progress,
@@ -474,75 +211,34 @@ bool ConflictResolver::ResolveSimpleConflicts(
// We have a simple conflict.
switch (ProcessSimpleConflict(&trans, id, status)) {
case NO_SYNC_PROGRESS:
- {
- int conflict_count = (simple_conflict_count_map_[id] += 2);
- LogAndSignalIfConflictStuck(&trans, conflict_count,
- &id, &id + 1, status);
- break;
- }
+ break;
case SYNC_PROGRESS:
forward_progress = true;
break;
}
}
}
- // Reduce the simple_conflict_count for each item currently tracked.
- SimpleConflictCountMap::iterator i = simple_conflict_count_map_.begin();
- while (i != simple_conflict_count_map_.end()) {
- if (0 == --(i->second))
- simple_conflict_count_map_.erase(i++);
- else
- ++i;
- }
return forward_progress;
}
bool ConflictResolver::ResolveConflicts(const ScopedDirLookup& dir,
const ConflictProgress& progress,
sessions::StatusController* status) {
- bool rv = false;
- if (ResolveSimpleConflicts(dir, progress, status))
- rv = true;
- WriteTransaction trans(FROM_HERE, syncable::SYNCER, dir);
- set<ConflictSet*>::const_iterator set_it;
- for (set_it = progress.ConflictSetsBegin();
- set_it != progress.ConflictSetsEnd();
- set_it++) {
- ConflictSet* conflict_set = *set_it;
- ConflictSetCountMapKey key = GetSetKey(conflict_set);
- conflict_set_count_map_[key] += 2;
- int conflict_count = conflict_set_count_map_[key];
- // Keep a metric for new sets.
- if (2 == conflict_count) {
- // METRIC conflict sets seen ++
- }
- // See if we should process this set.
- if (ProcessConflictSet(&trans, conflict_set, conflict_count)) {
- rv = true;
- }
- LogAndSignalIfConflictStuck(&trans, conflict_count,
- conflict_set->begin(),
- conflict_set->end(), status);
- }
- if (rv) {
- // This code means we don't signal that syncing is stuck when any conflict
- // resolution has occured.
- // TODO(sync): As this will also reduce our sensitivity to problem
- // conditions and increase the time for cascading resolutions we may have to
- // revisit this code later, doing something more intelligent.
- conflict_set_count_map_.clear();
- simple_conflict_count_map_.clear();
- }
- ConflictSetCountMap::iterator i = conflict_set_count_map_.begin();
- while (i != conflict_set_count_map_.end()) {
- if (0 == --i->second) {
- conflict_set_count_map_.erase(i++);
- // METRIC self resolved conflict sets ++.
- } else {
- ++i;
- }
+ // TODO(rlarocque): A good amount of code related to the resolution of
+ // conflict sets has been deleted here. This was done because the code had
+ // not been used in years. An unrelated bug fix accidentally re-enabled the
+ // code, forcing us to make a decision about what we should do with the code.
+ // We decided to do the safe thing and delete it for now. This restores the
+ // behaviour we've relied on for quite some time. We should think about what
+ // that code was trying to do and consider re-enabling parts of it.
+
+ if (progress.ConflictSetsSize() > 0) {
+ DVLOG(1) << "Detected " << progress.IdToConflictSetSize()
+ << " non-simple conflicting entries in " << progress.ConflictSetsSize()
+ << " unprocessed conflict sets.";
}
- return rv;
+
+ return ResolveSimpleConflicts(dir, progress, status);
}
} // namespace browser_sync
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
index c622794..38e418e 100644
--- a/chrome/browser/sync/engine/conflict_resolver.h
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -46,23 +46,11 @@ class ConflictResolver {
sessions::StatusController* status);
private:
- // We keep a map to record how often we've seen each conflict set. We use this
- // to screen out false positives caused by transient server or client states,
- // and to allow us to try to make smaller changes to fix situations before
- // moving onto more drastic solutions.
- typedef std::string ConflictSetCountMapKey;
- typedef std::map<ConflictSetCountMapKey, int> ConflictSetCountMap;
- typedef std::map<syncable::Id, int> SimpleConflictCountMap;
-
enum ProcessSimpleConflictResult {
NO_SYNC_PROGRESS, // No changes to advance syncing made.
SYNC_PROGRESS, // Progress made.
};
- // Get a key for the given set. NOTE: May reorder set contents. The key is
- // currently not very efficient, but will ease debugging.
- ConflictSetCountMapKey GetSetKey(ConflictSet* conflict_set);
-
void IgnoreLocalChanges(syncable::MutableEntry* entry);
void OverwriteServerChanges(syncable::WriteTransaction* trans,
syncable::MutableEntry* entry);
@@ -76,20 +64,6 @@ class ConflictResolver {
const sessions::ConflictProgress& progress,
sessions::StatusController* status);
- bool ProcessConflictSet(syncable::WriteTransaction* trans,
- ConflictSet* conflict_set,
- int conflict_count);
-
- // Returns true if we're stuck.
- template <typename InputIt>
- bool LogAndSignalIfConflictStuck(syncable::BaseTransaction* trans,
- int attempt_count,
- InputIt start, InputIt end,
- sessions::StatusController* status);
-
- ConflictSetCountMap conflict_set_count_map_;
- SimpleConflictCountMap simple_conflict_count_map_;
-
DISALLOW_COPY_AND_ASSIGN(ConflictResolver);
};
diff --git a/chrome/browser/sync/engine/syncer.cc b/chrome/browser/sync/engine/syncer.cc
index 516c15f..ae97c5d 100644
--- a/chrome/browser/sync/engine/syncer.cc
+++ b/chrome/browser/sync/engine/syncer.cc
@@ -94,8 +94,7 @@ Syncer::ScopedSyncStartStopTracker::~ScopedSyncStartStopTracker() {
}
Syncer::Syncer()
- : early_exit_requested_(false),
- pre_conflict_resolution_closure_(NULL) {
+ : early_exit_requested_(false) {
}
Syncer::~Syncer() {}
@@ -255,13 +254,6 @@ void Syncer::SyncShare(sessions::SyncSession* session,
break;
}
case RESOLVE_CONFLICTS: {
-
- // Trigger the pre_conflict_resolution_closure_, which is a testing
- // hook for the unit tests, if it is non-NULL.
- if (pre_conflict_resolution_closure_) {
- pre_conflict_resolution_closure_->Run();
- }
-
StatusController* status = session->mutable_status_controller();
status->reset_conflicts_resolved();
ResolveConflictsCommand resolve_conflicts_command;
diff --git a/chrome/browser/sync/engine/syncer.h b/chrome/browser/sync/engine/syncer.h
index 8ce431a..b8b1010 100644
--- a/chrome/browser/sync/engine/syncer.h
+++ b/chrome/browser/sync/engine/syncer.h
@@ -10,7 +10,6 @@
#include <vector>
#include "base/basictypes.h"
-#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
@@ -94,15 +93,9 @@ class Syncer {
ConflictResolver resolver_;
- // A callback hook used in unittests to simulate changes between conflict set
- // building and conflict resolution.
- Callback0::Type* pre_conflict_resolution_closure_;
-
friend class SyncerTest;
FRIEND_TEST_ALL_PREFIXES(SyncerTest, NameClashWithResolver);
FRIEND_TEST_ALL_PREFIXES(SyncerTest, IllegalAndLegalUpdates);
- FRIEND_TEST_ALL_PREFIXES(SusanDeletingTest,
- NewServerItemInAFolderHierarchyWeHaveDeleted3);
FRIEND_TEST_ALL_PREFIXES(SyncerTest, TestCommitListOrderingAndNewParent);
FRIEND_TEST_ALL_PREFIXES(SyncerTest,
TestCommitListOrderingAndNewParentAndChild);
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 405bf45..20fe2d0 100644
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -21,7 +21,6 @@
#include "base/stringprintf.h"
#include "base/time.h"
#include "build/build_config.h"
-#include "chrome/browser/sync/engine/conflict_resolver.h"
#include "chrome/browser/sync/engine/get_commit_ids_command.h"
#include "chrome/browser/sync/engine/model_safe_worker.h"
#include "chrome/browser/sync/engine/net/server_connection_manager.h"
@@ -2626,7 +2625,6 @@ TEST_F(SyncerTest, NewEntryAndAlteredServerEntrySharePath_OldBookmarksProto) {
}
}
-
// Circular links should be resolved by the server.
TEST_F(SyncerTest, SiblingDirectoriesBecomeCircular) {
// we don't currently resolve this. This test ensures we don't.
@@ -2734,37 +2732,6 @@ TEST_F(SyncerTest, DualDeletionWithNewItemNameClash) {
saw_syncer_event_ = false;
}
-TEST_F(SyncerTest, FixDirectoryLoopConflict) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
- }
- mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(fred.good());
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
@@ -2797,740 +2764,6 @@ TEST_F(SyncerTest, ResolveWeWroteTheyDeleted) {
saw_syncer_event_ = false;
}
-TEST_F(SyncerTest, ServerDeletingFolderWeHaveMovedSomethingInto) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- syncable::Id bob_id = ids_.NewServerId();
- syncable::Id fred_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, fred_id);
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
-
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(bob.Get(NON_UNIQUE_NAME) == "bob");
- EXPECT_NE(bob.Get(PARENT_ID), fred_id);
-
- // Entry was deleted and reborn.
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
-
- // Reborn fred
- Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- ASSERT_TRUE(fred.good());
- EXPECT_TRUE(fred.Get(PARENT_ID) == trans.root_id());
- EXPECT_EQ("fred", fred.Get(NON_UNIQUE_NAME));
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-// TODO(ncarter): This test is bogus, but it actually seems to hit an
-// interesting case the 4th time SyncShare is called.
-// TODO(chron): The fourth time that SyncShare is called it crashes.
-// This seems to be due to a bug in the conflict set building logic.
-// http://crbug.com/46621
-TEST_F(SyncerTest, DISABLED_ServerDeletingFolderWeHaveAnOpenEntryIn) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateBookmark(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- WriteTestDataToEntry(&trans, &bob);
- }
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- EXPECT_FALSE(bob.Get(IS_UNSYNCED));
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
- }
- mock_server_->AddUpdateDirectory(2, 0, "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- saw_syncer_event_ = false;
- // These SyncShares would cause a CHECK because we'd think we were stuck.
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- EXPECT_FALSE(saw_syncer_event_);
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Id fred_id =
- GetOnlyEntryWithName(&trans, TestIdFactory::root(), "fred");
- Entry fred(&trans, GET_BY_ID, fred_id);
- ASSERT_TRUE(fred.good());
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- syncable::Id bob_id = ids_.NewServerId();
- syncable::Id fred_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- Entry fred(&trans, GET_BY_ID, fred_id);
- ASSERT_TRUE(fred.good());
-
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, fred_id);
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
-
- // Entry was deleted by server. We'll make a new one though with a new ID.
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
-
- // Fred is reborn with a local ID.
- Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- EXPECT_EQ("fred", fred.Get(NON_UNIQUE_NAME));
- EXPECT_EQ(TestIdFactory::root(), fred.Get(PARENT_ID));
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
-
- // Bob needs to update his parent.
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest,
- WeMovedSomethingIntoAFolderServerHasDeletedAndWeRenamed) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- const syncable::Id bob_id = TestIdFactory::FromNumber(1);
- const syncable::Id fred_id = TestIdFactory::FromNumber(2);
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry fred(&trans, GET_BY_ID, fred_id);
- ASSERT_TRUE(fred.good());
- fred.Put(IS_UNSYNCED, true);
- fred.Put(SYNCING, false);
- fred.Put(NON_UNIQUE_NAME, "Alice");
-
- // Move Bob within Fred (now Alice).
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- CHECK(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(SYNCING, false);
- bob.Put(PARENT_ID, fred_id);
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
-
- // Old entry is dead
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
-
- // New ID is created to fill parent folder, named correctly
- Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- ASSERT_TRUE(alice.good());
- EXPECT_EQ("Alice", alice.Get(NON_UNIQUE_NAME));
- EXPECT_TRUE(alice.Get(IS_UNSYNCED));
- EXPECT_FALSE(alice.Get(ID).ServerKnows());
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(PARENT_ID) == alice.Get(ID));
- EXPECT_TRUE(alice.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-
-TEST_F(SyncerTest,
- WeMovedADirIntoAndCreatedAnEntryInAFolderServerHasDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- syncable::Id bob_id = ids_.NewServerId();
- syncable::Id fred_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 1, 10);
- SyncShareAsDelegate();
- syncable::Id new_item_id;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, fred_id);
- MutableEntry new_item(&trans, CREATE, fred_id, "new_item");
- WriteTestDataToEntry(&trans, &new_item);
- new_item_id = new_item.Get(ID);
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
-
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_NE(bob.Get(PARENT_ID), fred_id);
-
- // Was recreated. Old one shouldn't exist.
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
-
- Entry fred(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- ASSERT_TRUE(fred.good());
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
- EXPECT_EQ("fred", fred.Get(NON_UNIQUE_NAME));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
-
- Entry new_item(&trans, GET_BY_ID, new_item_id);
- ASSERT_TRUE(new_item.good());
- EXPECT_EQ(new_item.Get(PARENT_ID), fred.Get(ID));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, ServerMovedSomethingIntoAFolderWeHaveDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- LoopSyncShare();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- LoopSyncShare();
- LoopSyncShare();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(fred.good());
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(PARENT_ID) == bob.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, ServerMovedAFolderIntoAFolderWeHaveDeletedAndMovedIntoIt) {
- // This test combines circular folders and deleted parents.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 0, "fred", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- bob.Put(PARENT_ID, ids_.FromNumber(2));
- }
- mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(fred.good());
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_DEL));
- EXPECT_TRUE(fred.Get(PARENT_ID) == root_id_);
- EXPECT_TRUE(bob.Get(PARENT_ID) == fred.Get(ID));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, NewServerItemInAFolderWeHaveDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- LoopSyncShare();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateDirectory(2, 1, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- LoopSyncShare();
- LoopSyncShare();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(fred.good());
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(PARENT_ID) == bob.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(1, 0, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
- LoopSyncShare();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(joe.good());
- joe.Put(IS_UNSYNCED, true);
- joe.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- LoopSyncShare();
- LoopSyncShare();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(fred.good());
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(joe.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(PARENT_ID) == joe.Get(ID));
- EXPECT_TRUE(joe.Get(PARENT_ID) == bob.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, NewServerItemInAFolderHierarchyWeHaveDeleted2) {
- // The difference here is that the hierarchy's not in the root. We have
- // another entry that shouldn't be touched.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
- mock_server_->AddUpdateDirectory(4, 0, "susan", 1, 10);
- mock_server_->AddUpdateDirectory(1, 4, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(2, 1, "joe", 1, 10);
- LoopSyncShare();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
- MutableEntry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(joe.good());
- joe.Put(IS_UNSYNCED, true);
- joe.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateDirectory(3, 2, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- LoopSyncShare();
- LoopSyncShare();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(fred.good());
- Entry susan(&trans, GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(susan.good());
- EXPECT_FALSE(susan.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(joe.Get(IS_UNSYNCED));
- EXPECT_EQ(fred.Get(PARENT_ID), joe.Get(ID));
- EXPECT_EQ(joe.Get(PARENT_ID), bob.Get(ID));
- EXPECT_EQ(bob.Get(PARENT_ID), susan.Get(ID));
- EXPECT_EQ(susan.Get(PARENT_ID), root_id_);
- EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-
-class SusanDeletingTest : public SyncerTest {
- public:
- SusanDeletingTest() : countdown_till_delete_(0) {}
-
- static const int64 susan_int_id_ = 4;
-
- void DeleteSusanInRoot() {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- ASSERT_TRUE(dir.good());
-
- const syncable::Id susan_id = TestIdFactory::FromNumber(susan_int_id_);
- ASSERT_GT(countdown_till_delete_, 0);
- if (0 != --countdown_till_delete_)
- return;
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry susan(&trans, GET_BY_ID, susan_id);
- Directory::ChildHandles children;
- dir->GetChildHandlesById(&trans, susan.Get(ID), &children);
- ASSERT_EQ(0u, children.size());
- susan.Put(IS_DEL, true);
- susan.Put(IS_UNSYNCED, true);
-}
-
- protected:
- int countdown_till_delete_;
-};
-
-TEST_F(SusanDeletingTest,
- NewServerItemInAFolderHierarchyWeHaveDeleted3) {
- // Same as 2, except we deleted the folder the set is in between set building
- // and conflict resolution.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- const syncable::Id bob_id = TestIdFactory::FromNumber(1);
- const syncable::Id joe_id = TestIdFactory::FromNumber(2);
- const syncable::Id fred_id = TestIdFactory::FromNumber(3);
- const syncable::Id susan_id = TestIdFactory::FromNumber(susan_int_id_);
-
- mock_server_->AddUpdateDirectory(susan_id, TestIdFactory::root(),
- "susan", 1, 10);
- mock_server_->AddUpdateDirectory(bob_id, susan_id, "bob", 1, 10);
- mock_server_->AddUpdateDirectory(joe_id, bob_id, "joe", 1, 10);
- LoopSyncShare();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(IS_DEL, true);
-
- MutableEntry joe(&trans, GET_BY_ID, joe_id);
- ASSERT_TRUE(joe.good());
- joe.Put(IS_UNSYNCED, true);
- joe.Put(IS_DEL, true);
- }
- mock_server_->AddUpdateDirectory(fred_id, joe_id, "fred", 2, 20);
- mock_server_->set_conflict_all_commits(true);
- countdown_till_delete_ = 2;
- syncer_->pre_conflict_resolution_closure_ =
- NewCallback<SusanDeletingTest>(this,
- &SusanDeletingTest::DeleteSusanInRoot);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, joe_id);
- ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, fred_id);
- ASSERT_TRUE(fred.good());
- Entry susan(&trans, GET_BY_ID, susan_id);
- ASSERT_TRUE(susan.good());
- EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(susan.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(joe.Get(IS_UNSYNCED));
- }
- EXPECT_EQ(0, countdown_till_delete_);
- delete syncer_->pre_conflict_resolution_closure_;
- syncer_->pre_conflict_resolution_closure_ = NULL;
- LoopSyncShare();
- LoopSyncShare();
- {
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- Entry joe(&trans, GET_BY_ID, joe_id);
- ASSERT_TRUE(joe.good());
- Entry fred(&trans, GET_BY_ID, fred_id);
- ASSERT_TRUE(fred.good());
- Entry susan(&trans, GET_BY_ID, susan_id);
- ASSERT_TRUE(susan.good());
- EXPECT_TRUE(susan.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNSYNCED));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
- EXPECT_TRUE(joe.Get(IS_UNSYNCED));
- EXPECT_TRUE(fred.Get(PARENT_ID) == joe.Get(ID));
- EXPECT_TRUE(joe.Get(PARENT_ID) == bob.Get(ID));
- EXPECT_TRUE(bob.Get(PARENT_ID) == susan.Get(ID));
- EXPECT_TRUE(susan.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(joe.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted) {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- const syncable::Id bob_id = ids_.NewServerId();
- const syncable::Id fred_id = ids_.NewServerId();
- const syncable::Id alice_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 1, 10);
- mock_server_->AddUpdateDirectory(alice_id, fred_id, "alice", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, alice_id); // Move into alice.
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->AddUpdateDirectory(alice_id, TestIdFactory::root(),
- "alice", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- // Bob is the entry at the bottom of the tree.
- // The tree should be regenerated and old IDs removed.
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED));
-
- // Old one should be deleted, but new one should have been made.
- Entry dead_alice(&trans, GET_BY_ID, alice_id);
- EXPECT_FALSE(dead_alice.good());
- EXPECT_NE(bob.Get(PARENT_ID), alice_id);
-
- // Newly born alice
- Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- ASSERT_TRUE(alice.good());
- EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(alice.Get(IS_UNSYNCED));
- EXPECT_FALSE(alice.Get(ID).ServerKnows());
- EXPECT_TRUE(alice.Get(NON_UNIQUE_NAME) == "alice");
-
- // Alice needs a parent as well. Old parent should have been erased.
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
- EXPECT_NE(alice.Get(PARENT_ID), fred_id);
-
- Entry fred(&trans, GET_BY_ID, alice.Get(PARENT_ID));
- ASSERT_TRUE(fred.good());
- EXPECT_EQ(fred.Get(PARENT_ID), TestIdFactory::root());
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(fred.Get(NON_UNIQUE_NAME) == "fred");
- }
- saw_syncer_event_ = false;
-}
-
-TEST_F(SyncerTest, WeMovedSomethingIntoAFolderHierarchyServerHasDeleted2) {
- // The difference here is that the hierarchy is not in the root. We have
- // another entry that shouldn't be touched.
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- const syncable::Id bob_id = ids_.NewServerId();
- const syncable::Id fred_id = ids_.NewServerId();
- const syncable::Id alice_id = ids_.NewServerId();
- const syncable::Id susan_id = ids_.NewServerId();
-
- mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
- "bob", 1, 10);
- mock_server_->AddUpdateDirectory(susan_id, TestIdFactory::root(),
- "susan", 1, 10);
- mock_server_->AddUpdateDirectory(fred_id, susan_id, "fred", 1, 10);
- mock_server_->AddUpdateDirectory(alice_id, fred_id, "alice", 1, 10);
- SyncShareAsDelegate();
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- MutableEntry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- bob.Put(IS_UNSYNCED, true);
- bob.Put(PARENT_ID, alice_id); // Move into alice.
- }
- mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
- "fred", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->AddUpdateDirectory(alice_id, TestIdFactory::root(),
- "alice", 2, 20);
- mock_server_->SetLastUpdateDeleted();
- mock_server_->set_conflict_all_commits(true);
- SyncShareAsDelegate();
- SyncShareAsDelegate();
- {
- // Root
- // |- Susan
- // |- Fred
- // |- Alice
- // |- Bob
-
- ReadTransaction trans(FROM_HERE, dir);
- Entry bob(&trans, GET_BY_ID, bob_id);
- ASSERT_TRUE(bob.good());
- EXPECT_FALSE(bob.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_TRUE(bob.Get(IS_UNSYNCED)); // Parent changed
- EXPECT_NE(bob.Get(PARENT_ID), alice_id);
-
- // New one was born, this is the old one
- Entry dead_alice(&trans, GET_BY_ID, alice_id);
- EXPECT_FALSE(dead_alice.good());
-
- // Newly born
- Entry alice(&trans, GET_BY_ID, bob.Get(PARENT_ID));
- EXPECT_TRUE(alice.Get(IS_UNSYNCED));
- EXPECT_FALSE(alice.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(alice.Get(ID).ServerKnows());
- EXPECT_NE(alice.Get(PARENT_ID), fred_id); // This fred was deleted
-
- // New one was born, this is the old one
- Entry dead_fred(&trans, GET_BY_ID, fred_id);
- EXPECT_FALSE(dead_fred.good());
-
- // Newly born
- Entry fred(&trans, GET_BY_ID, alice.Get(PARENT_ID));
- EXPECT_TRUE(fred.Get(IS_UNSYNCED));
- EXPECT_FALSE(fred.Get(IS_UNAPPLIED_UPDATE));
- EXPECT_FALSE(fred.Get(ID).ServerKnows());
- EXPECT_TRUE(fred.Get(PARENT_ID) == susan_id);
-
- // Unchanged
- Entry susan(&trans, GET_BY_ID, susan_id);
- ASSERT_TRUE(susan.good());
- EXPECT_FALSE(susan.Get(IS_UNSYNCED));
- EXPECT_TRUE(susan.Get(PARENT_ID) == root_id_);
- EXPECT_FALSE(susan.Get(IS_UNAPPLIED_UPDATE));
- }
- saw_syncer_event_ = false;
-}
-
// This test is to reproduce a check failure. Sometimes we would get a bad ID
// back when creating an entry.
TEST_F(SyncerTest, DuplicateIDReturn) {
@@ -3580,41 +2813,6 @@ TEST_F(SyncerTest, DeletedEntryWithBadParentInLoopCalculation) {
SyncShareAsDelegate();
}
-TEST_F(SyncerTest, ConflictResolverMergeOverwritesLocalEntry) {
- // This test would die because it would rename a entry to a name that was
- // taken in the namespace
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- ConflictSet conflict_set;
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
-
- MutableEntry local_deleted(&trans, CREATE, trans.root_id(), "name");
- local_deleted.Put(ID, ids_.FromNumber(1));
- local_deleted.Put(BASE_VERSION, 1);
- local_deleted.Put(IS_DEL, true);
- local_deleted.Put(IS_UNSYNCED, true);
-
- MutableEntry in_the_way(&trans, CREATE, trans.root_id(), "name");
- in_the_way.Put(ID, ids_.FromNumber(2));
- in_the_way.Put(BASE_VERSION, 1);
-
- MutableEntry update(&trans, CREATE_NEW_UPDATE_ITEM, ids_.FromNumber(3));
- update.Put(BASE_VERSION, 1);
- update.Put(SERVER_NON_UNIQUE_NAME, "name");
- update.Put(PARENT_ID, ids_.FromNumber(0));
- update.Put(IS_UNAPPLIED_UPDATE, true);
-
- conflict_set.push_back(ids_.FromNumber(1));
- conflict_set.push_back(ids_.FromNumber(3));
- }
- {
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- context_->resolver()->ProcessConflictSet(&trans, &conflict_set, 50);
- }
-}
-
TEST_F(SyncerTest, ConflictResolverMergesLocalDeleteAndServerUpdate) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());