summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorzea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-11-09 21:40:08 +0000
committerzea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-11-09 21:40:08 +0000
commit279e7f7cf7a549eb2e2bf5f668cbe9867e2706cd (patch)
tree46624fe808311713c79f2563ebf9adb531aac231
parent0b9c398f5394d058268eb607000f774b2b00f0a1 (diff)
downloadchromium_src-279e7f7cf7a549eb2e2bf5f668cbe9867e2706cd.zip
chromium_src-279e7f7cf7a549eb2e2bf5f668cbe9867e2706cd.tar.gz
chromium_src-279e7f7cf7a549eb2e2bf5f668cbe9867e2706cd.tar.bz2
Merge 108893 - [Sync] Don't commit items we know are not ready.
Items whose version does not match the server version or who require (re)encryption are filtered out from the set of commit id's. BUG=100660 TEST=sync_integration_tests, sync_unit_tests Review URL: http://codereview.chromium.org/8400035 TBR=zea@chromium.org Review URL: http://codereview.chromium.org/8505036 git-svn-id: svn://svn.chromium.org/chrome/branches/912/src@109306 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.cc84
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.h17
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc228
-rw-r--r--chrome/browser/sync/test/integration/two_client_bookmarks_sync_test.cc52
-rw-r--r--chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc10
5 files changed, 262 insertions, 129 deletions
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.cc b/chrome/browser/sync/engine/get_commit_ids_command.cc
index 89fc309..4bfc5e00 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.cc
+++ b/chrome/browser/sync/engine/get_commit_ids_command.cc
@@ -24,7 +24,8 @@ using sessions::SyncSession;
using sessions::StatusController;
GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size)
- : requested_commit_batch_size_(commit_batch_size) {}
+ : requested_commit_batch_size_(commit_batch_size),
+ passphrase_missing_(false) {}
GetCommitIdsCommand::~GetCommitIdsCommand() {}
@@ -35,17 +36,21 @@ void GetCommitIdsCommand::ExecuteImpl(SyncSession* session) {
SyncerUtil::GetUnsyncedEntries(session->write_transaction(),
&all_unsynced_handles);
- Cryptographer *cryptographer =
+ Cryptographer* cryptographer =
session->context()->directory_manager()->GetCryptographer(
session->write_transaction());
if (cryptographer) {
- FilterEntriesNeedingEncryption(cryptographer->GetEncryptedTypes(),
- session->write_transaction(),
- &all_unsynced_handles);
- }
+ encrypted_types_ = cryptographer->GetEncryptedTypes();
+ passphrase_missing_ = cryptographer->has_pending_keys();
+ };
+ // We filter out all unready entries from the set of unsynced handles to
+ // ensure we don't trigger useless sync cycles attempting to retry due to
+ // there being work to do. (see ScheduleNextSync in sync_scheduler)
+ FilterUnreadyEntries(session->write_transaction(),
+ &all_unsynced_handles);
+
StatusController* status = session->status_controller();
status->set_unsynced_handles(all_unsynced_handles);
-
BuildCommitIds(status->unsynced_handles(), session->write_transaction(),
session->routing_info());
@@ -58,35 +63,62 @@ void GetCommitIdsCommand::ExecuteImpl(SyncSession* session) {
status->set_commit_set(*ordered_commit_set_.get());
}
-// We create a new list of unsynced handles which omits all handles to entries
-// that require encryption but are written in plaintext. If any were found we
-// overwrite |unsynced_handles| with this new list, else no change is made.
-// Static.
-void GetCommitIdsCommand::FilterEntriesNeedingEncryption(
- const syncable::ModelTypeSet& encrypted_types,
+namespace {
+
+// An entry ready for commit is defined as one not in conflict (SERVER_VERSION
+// == BASE_VERSION || SERVER_VERSION == 0) and not requiring encryption
+// (any entry containing an encrypted datatype while the cryptographer requires
+// a passphrase is not ready for commit.)
+bool IsEntryReadyForCommit(const syncable::ModelTypeSet& encrypted_types,
+ bool passphrase_missing,
+ const syncable::Entry& entry) {
+ if (!entry.Get(syncable::IS_UNSYNCED))
+ return false;
+
+ if (entry.Get(syncable::SERVER_VERSION) > 0 &&
+ (entry.Get(syncable::SERVER_VERSION) >
+ entry.Get(syncable::BASE_VERSION))) {
+ // The local and server versions don't match. The item must be in
+ // conflict, so there's no point in attempting to commit.
+ DCHECK(entry.Get(syncable::IS_UNAPPLIED_UPDATE)); // In conflict.
+ // TODO(zea): switch this to DVLOG once it's clear bug 100660 is fixed.
+ VLOG(1) << "Excluding entry from commit due to version mismatch "
+ << entry;
+ return false;
+ }
+
+ if (encrypted_types.count(entry.GetModelType()) > 0 &&
+ (passphrase_missing ||
+ syncable::EntryNeedsEncryption(encrypted_types, entry))) {
+ // This entry requires encryption but is not properly encrypted (possibly
+ // due to the cryptographer not being initialized or the user hasn't
+ // provided the most recent passphrase).
+ // TODO(zea): switch this to DVLOG once it's clear bug 100660 is fixed.
+ VLOG(1) << "Excluding entry from commit due to lack of encryption "
+ << entry;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+void GetCommitIdsCommand::FilterUnreadyEntries(
syncable::BaseTransaction* trans,
syncable::Directory::UnsyncedMetaHandles* unsynced_handles) {
- bool removed_handles = false;
syncable::Directory::UnsyncedMetaHandles::iterator iter;
syncable::Directory::UnsyncedMetaHandles new_unsynced_handles;
new_unsynced_handles.reserve(unsynced_handles->size());
- // TODO(zea): If this becomes a bottleneck, we should merge this loop into the
- // AddCreatesAndMoves and AddDeletes loops.
for (iter = unsynced_handles->begin();
iter != unsynced_handles->end();
++iter) {
syncable::Entry entry(trans, syncable::GET_BY_HANDLE, *iter);
- if (syncable::EntryNeedsEncryption(encrypted_types, entry)) {
- // This entry requires encryption but is not encrypted (possibly due to
- // the cryptographer not being initialized). Don't add it to our new list
- // of unsynced handles.
- removed_handles = true;
- } else {
+ if (IsEntryReadyForCommit(encrypted_types_, passphrase_missing_, entry))
new_unsynced_handles.push_back(*iter);
- }
}
- if (removed_handles)
- *unsynced_handles = new_unsynced_handles;
+ if (new_unsynced_handles.size() != unsynced_handles->size())
+ unsynced_handles->swap(new_unsynced_handles);
}
void GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
@@ -117,6 +149,8 @@ void GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors(
bool GetCommitIdsCommand::AddItem(syncable::Entry* item,
OrderedCommitSet* result) {
+ if (!IsEntryReadyForCommit(encrypted_types_, passphrase_missing_, *item))
+ return false;
int64 item_handle = item->Get(syncable::META_HANDLE);
if (result->HaveCommitItem(item_handle) ||
ordered_commit_set_->HaveCommitItem(item_handle)) {
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.h b/chrome/browser/sync/engine/get_commit_ids_command.h
index 86024e1..8f41f2f 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.h
+++ b/chrome/browser/sync/engine/get_commit_ids_command.h
@@ -29,13 +29,6 @@ class GetCommitIdsCommand : public SyncerCommand {
// SyncerCommand implementation.
virtual void ExecuteImpl(sessions::SyncSession* session);
- // Filter |unsynced_handles| to exclude all handles to entries that require
- // encryption but are in plaintext.
- static void FilterEntriesNeedingEncryption(
- const syncable::ModelTypeSet& encrypted_types,
- syncable::BaseTransaction* trans,
- syncable::Directory::UnsyncedMetaHandles* unsynced_handles);
-
// Builds a vector of IDs that should be committed.
void BuildCommitIds(const vector<int64>& unsynced_handles,
syncable::WriteTransaction* write_transaction,
@@ -114,6 +107,14 @@ class GetCommitIdsCommand : public SyncerCommand {
};
private:
+ // Removes all entries not ready for commit from |unsynced_handles|.
+ // An entry is considered unready for commit if it's in conflict or requires
+ // (re)encryption. Any datatype requiring encryption while the cryptographer
+ // is missing a passphrase is considered unready for commit.
+ void FilterUnreadyEntries(
+ syncable::BaseTransaction* trans,
+ syncable::Directory::UnsyncedMetaHandles* unsynced_handles);
+
void AddUncommittedParentsAndTheirPredecessors(
syncable::BaseTransaction* trans,
syncable::Id parent_id,
@@ -144,6 +145,8 @@ class GetCommitIdsCommand : public SyncerCommand {
scoped_ptr<sessions::OrderedCommitSet> ordered_commit_set_;
int requested_commit_batch_size_;
+ bool passphrase_missing_;
+ syncable::ModelTypeSet encrypted_types_;
DISALLOW_COPY_AND_ASSIGN(GetCommitIdsCommand);
};
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 241526c..9985120 100644
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -551,54 +551,145 @@ TEST_F(SyncerTest, GetCommitIdsCommandTruncates) {
DoTruncationTest(dir, unsynced_handle_view, expected_order);
}
-TEST_F(SyncerTest, GetCommitIdsFilterEntriesNeedingEncryption) {
+TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
- int64 handle_x = CreateUnsyncedDirectory("X", ids_.MakeLocal("x"));
- int64 handle_b = CreateUnsyncedDirectory("B", ids_.MakeLocal("b"));
- int64 handle_c = CreateUnsyncedDirectory("C", ids_.MakeLocal("c"));
- int64 handle_e = CreateUnsyncedDirectory("E", ids_.MakeLocal("e"));
- int64 handle_f = CreateUnsyncedDirectory("F", ids_.MakeLocal("f"));
+ KeyParams key_params = {"localhost", "dummy", "foobar"};
sync_pb::EncryptedData encrypted;
sync_pb::EntitySpecifics encrypted_bookmark;
encrypted_bookmark.mutable_encrypted();
AddDefaultExtensionValue(syncable::BOOKMARKS, &encrypted_bookmark);
+ mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
+ mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
+ mock_server_->AddUpdateDirectory(3, 0, "C", 10, 10);
+ mock_server_->AddUpdateDirectory(4, 0, "D", 10, 10);
+ SyncShareAsDelegate();
+ // Server side change will put A in conflict.
+ mock_server_->AddUpdateDirectory(1, 0, "A", 20, 20);
{
+ // Mark bookmarks as encrypted and set the cryptographer to have pending
+ // keys.
WriteTransaction wtrans(FROM_HERE, UNITTEST, dir);
- MutableEntry entry_x(&wtrans, GET_BY_HANDLE, handle_x);
- MutableEntry entry_b(&wtrans, GET_BY_HANDLE, handle_b);
- MutableEntry entry_c(&wtrans, GET_BY_HANDLE, handle_c);
- MutableEntry entry_e(&wtrans, GET_BY_HANDLE, handle_e);
- MutableEntry entry_f(&wtrans, GET_BY_HANDLE, handle_f);
- entry_x.Put(SPECIFICS, encrypted_bookmark);
- entry_x.Put(NON_UNIQUE_NAME, kEncryptedString);
- entry_b.Put(SPECIFICS, DefaultBookmarkSpecifics());
- entry_c.Put(SPECIFICS, DefaultBookmarkSpecifics());
- entry_e.Put(SPECIFICS, encrypted_bookmark);
- entry_e.Put(NON_UNIQUE_NAME, kEncryptedString);
- entry_f.Put(SPECIFICS, DefaultPreferencesSpecifics());
- }
-
- syncable::ModelTypeSet encrypted_types;
- encrypted_types.insert(syncable::BOOKMARKS);
- syncable::Directory::UnsyncedMetaHandles unsynced_handles;
- unsynced_handles.push_back(handle_x);
- unsynced_handles.push_back(handle_b);
- unsynced_handles.push_back(handle_c);
- unsynced_handles.push_back(handle_e);
- unsynced_handles.push_back(handle_f);
- // The unencrypted bookmarks should be removed from the list.
- syncable::Directory::UnsyncedMetaHandles expected_handles;
- expected_handles.push_back(handle_x); // Was encrypted.
- expected_handles.push_back(handle_e); // Was encrypted.
- expected_handles.push_back(handle_f); // Does not require encryption.
- {
+ browser_sync::Cryptographer other_cryptographer;
+ other_cryptographer.AddKey(key_params);
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori =
+ specifics.MutableExtension(sync_pb::nigori);
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ syncdb_.manager()->GetCryptographer(&wtrans)->Update(*nigori);
+
+ // In conflict but properly encrypted.
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(IS_UNSYNCED, true);
+ A.Put(SPECIFICS, encrypted_bookmark);
+ A.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Not in conflict and properly encrypted.
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(IS_UNSYNCED, true);
+ B.Put(SPECIFICS, encrypted_bookmark);
+ B.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Unencrypted specifics.
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(IS_UNSYNCED, true);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ // Unencrypted non_unique_name.
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(IS_UNSYNCED, true);
+ D.Put(SPECIFICS, encrypted_bookmark);
+ D.Put(NON_UNIQUE_NAME, "not encrypted");
+ }
+ SyncShareAsDelegate();
+ {
+ // We remove any unready entries from the status controller's unsynced
+ // handles, so this should remain 0 even though the entries didn't commit.
+ ASSERT_EQ(0U, session_->status_controller()->unsynced_handles().size());
+ // Nothing should have commited due to bookmarks being encrypted and
+ // the cryptographer having pending keys. A would have been resolved
+ // as a simple conflict, but still be unsynced until the next sync cycle.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entryA.good());
+ EXPECT_TRUE(entryA.Get(IS_UNSYNCED));
+ Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entryB.good());
+ EXPECT_TRUE(entryB.Get(IS_UNSYNCED));
+ Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entryC.good());
+ EXPECT_TRUE(entryC.Get(IS_UNSYNCED));
+ Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(entryD.good());
+ EXPECT_TRUE(entryD.Get(IS_UNSYNCED));
+
+ // Resolve the pending keys.
+ syncdb_.manager()->GetCryptographer(&rtrans)->DecryptPendingKeys(
+ key_params);
+ }
+ SyncShareAsDelegate();
+ {
+ ASSERT_EQ(0U, session_->status_controller()->unsynced_handles().size());
+ // All properly encrypted and non-conflicting items should commit. "A" was
+ // conflicting, but last sync cycle resolved it as simple conflict, so on
+ // this sync cycle it committed succesfullly.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ // Committed successfully.
+ Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entryA.good());
+ EXPECT_FALSE(entryA.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryA.Get(IS_UNAPPLIED_UPDATE));
+ // Committed successfully.
+ Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entryB.good());
+ EXPECT_FALSE(entryB.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryB.Get(IS_UNAPPLIED_UPDATE));
+ // Was not properly encrypted.
+ Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entryC.good());
+ EXPECT_TRUE(entryC.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryC.Get(IS_UNAPPLIED_UPDATE));
+ // Was not properly encrypted.
+ Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(entryD.good());
+ EXPECT_TRUE(entryD.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryD.Get(IS_UNAPPLIED_UPDATE));
+ }
+ {
+ // Fix the remaining items.
WriteTransaction wtrans(FROM_HERE, UNITTEST, dir);
- GetCommitIdsCommand::FilterEntriesNeedingEncryption(
- encrypted_types,
- &wtrans,
- &unsynced_handles);
- EXPECT_EQ(expected_handles, unsynced_handles);
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(SPECIFICS, encrypted_bookmark);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(SPECIFICS, encrypted_bookmark);
+ D.Put(NON_UNIQUE_NAME, kEncryptedString);
+ }
+ SyncShareAsDelegate();
+ {
+ ASSERT_EQ(0U, session_->status_controller()->unsynced_handles().size());
+ // None should be unsynced anymore.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(entryA.good());
+ EXPECT_FALSE(entryA.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryA.Get(IS_UNAPPLIED_UPDATE));
+ Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(entryB.good());
+ EXPECT_FALSE(entryB.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryB.Get(IS_UNAPPLIED_UPDATE));
+ Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(entryC.good());
+ EXPECT_FALSE(entryC.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryC.Get(IS_UNAPPLIED_UPDATE));
+ Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(entryD.good());
+ EXPECT_FALSE(entryD.Get(IS_UNSYNCED));
+ EXPECT_FALSE(entryD.Get(IS_UNAPPLIED_UPDATE));
}
}
@@ -2814,52 +2905,13 @@ TEST_F(SyncerTest, WeMovedSomethingIntoAFolderServerHasDeleted) {
saw_syncer_event_ = false;
}
-class FolderMoveDeleteRenameTest : public SyncerTest {
- public:
- FolderMoveDeleteRenameTest() : done_(false) {}
-
- static const int64 bob_id_number = 1;
- static const int64 fred_id_number = 2;
-
- void MoveBobIntoID2Runner() {
- if (!done_) {
- MoveBobIntoID2();
- done_ = true;
- }
- }
-
- protected:
- void MoveBobIntoID2() {
- ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
- CHECK(dir.good());
-
- WriteTransaction trans(FROM_HERE, UNITTEST, dir);
- Entry alice(&trans, GET_BY_ID,
- TestIdFactory::FromNumber(fred_id_number));
- CHECK(alice.good());
- EXPECT_TRUE(!alice.Get(IS_DEL));
- EXPECT_TRUE(alice.Get(SYNCING)) << "Expected to be called mid-commit.";
- MutableEntry bob(&trans, GET_BY_ID,
- TestIdFactory::FromNumber(bob_id_number));
- CHECK(bob.good());
- bob.Put(IS_UNSYNCED, true);
-
- bob.Put(SYNCING, false);
- bob.Put(PARENT_ID, alice.Get(ID));
- }
-
- bool done_;
-};
-
-TEST_F(FolderMoveDeleteRenameTest,
+TEST_F(SyncerTest,
WeMovedSomethingIntoAFolderServerHasDeletedAndWeRenamed) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
CHECK(dir.good());
- const syncable::Id bob_id = TestIdFactory::FromNumber(
- FolderMoveDeleteRenameTest::bob_id_number);
- const syncable::Id fred_id = TestIdFactory::FromNumber(
- FolderMoveDeleteRenameTest::fred_id_number);
+ const syncable::Id bob_id = TestIdFactory::FromNumber(1);
+ const syncable::Id fred_id = TestIdFactory::FromNumber(2);
mock_server_->AddUpdateDirectory(bob_id, TestIdFactory::root(),
"bob", 1, 10);
@@ -2873,18 +2925,18 @@ TEST_F(FolderMoveDeleteRenameTest,
fred.Put(IS_UNSYNCED, true);
fred.Put(SYNCING, false);
fred.Put(NON_UNIQUE_NAME, "Alice");
+
+ // Move Bob within Fred (now Alice).
+ MutableEntry bob(&trans, GET_BY_ID, bob_id);
+ CHECK(bob.good());
+ bob.Put(IS_UNSYNCED, true);
+ bob.Put(SYNCING, false);
+ bob.Put(PARENT_ID, fred_id);
}
mock_server_->AddUpdateDirectory(fred_id, TestIdFactory::root(),
"fred", 2, 20);
mock_server_->SetLastUpdateDeleted();
mock_server_->set_conflict_all_commits(true);
- // This test is a little brittle. We want to move the item into the folder
- // such that we think we're dealing with a simple conflict, but in reality
- // it's actually a conflict set.
- mock_server_->SetMidCommitCallback(
- NewCallback<FolderMoveDeleteRenameTest>(this,
- &FolderMoveDeleteRenameTest::MoveBobIntoID2Runner));
- SyncShareAsDelegate();
SyncShareAsDelegate();
SyncShareAsDelegate();
{
diff --git a/chrome/browser/sync/test/integration/two_client_bookmarks_sync_test.cc b/chrome/browser/sync/test/integration/two_client_bookmarks_sync_test.cc
index 8f6059e..259a2f9 100644
--- a/chrome/browser/sync/test/integration/two_client_bookmarks_sync_test.cc
+++ b/chrome/browser/sync/test/integration/two_client_bookmarks_sync_test.cc
@@ -5,6 +5,7 @@
#include "base/rand_util.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/sync/profile_sync_service_harness.h"
+#include "chrome/browser/sync/sessions/session_state.h"
#include "chrome/browser/sync/test/integration/bookmarks_helper.h"
#include "chrome/browser/sync/test/integration/sync_test.h"
@@ -37,6 +38,7 @@ const std::wstring kGenericURLTitle = L"URL Title";
const std::wstring kGenericFolderName = L"Folder Name";
const std::wstring kGenericSubfolderName = L"Subfolder Name";
const std::wstring kGenericSubsubfolderName = L"Subsubfolder Name";
+const char* kValidPassphrase = "passphrase!";
class TwoClientBookmarksSyncTest : public SyncTest {
public:
@@ -1786,6 +1788,50 @@ IN_PROC_BROWSER_TEST_F(TwoClientBookmarksSyncTest,
ASSERT_TRUE(AllModelsMatchVerifier());
}
-// TODO(zea): Add first time sync functionality testing. In particular, when
-// there are encrypted types in first time sync we need to ensure we don't
-// duplicate bookmarks.
+IN_PROC_BROWSER_TEST_F(TwoClientBookmarksSyncTest,
+ FirstClientEnablesEncryptionWithPassSecondChanges) {
+ ASSERT_TRUE(SetupSync()) << "SetupSync() failed.";
+ ASSERT_TRUE(AllModelsMatchVerifier());
+
+ // Add initial bookmarks.
+ ASSERT_TRUE(AddURL(0, 0, IndexedURLTitle(0), GURL(IndexedURL(0))) != NULL);
+ ASSERT_TRUE(AddURL(0, 1, IndexedURLTitle(1), GURL(IndexedURL(1))) != NULL);
+ ASSERT_TRUE(AddURL(0, 2, IndexedURLTitle(2), GURL(IndexedURL(2))) != NULL);
+ ASSERT_TRUE(GetClient(0)->AwaitMutualSyncCycleCompletion(GetClient(1)));
+ ASSERT_TRUE(AllModelsMatchVerifier());
+
+ // Set a passphrase and enable encryption on Client 0. Client 1 will not
+ // understand the bookmark updates.
+ GetClient(0)->service()->SetPassphrase(kValidPassphrase, true);
+ ASSERT_TRUE(GetClient(0)->AwaitPassphraseAccepted());
+ ASSERT_TRUE(EnableEncryption(0, syncable::BOOKMARKS));
+ ASSERT_TRUE(GetClient(0)->AwaitMutualSyncCycleCompletion(GetClient(1)));
+ ASSERT_TRUE(IsEncrypted(0, syncable::BOOKMARKS));
+ ASSERT_TRUE(IsEncrypted(1, syncable::BOOKMARKS));
+ ASSERT_TRUE(GetClient(1)->service()->IsPassphraseRequired());
+ ASSERT_GT(GetClient(1)->GetLastSessionSnapshot()->
+ num_conflicting_updates, 3); // The encrypted nodes.
+
+ // Client 1 adds bookmarks between the first two and between the second two.
+ ASSERT_TRUE(AddURL(0, 1, IndexedURLTitle(3), GURL(IndexedURL(3))) != NULL);
+ ASSERT_TRUE(AddURL(0, 3, IndexedURLTitle(4), GURL(IndexedURL(4))) != NULL);
+ EXPECT_FALSE(AllModelsMatchVerifier());
+ EXPECT_FALSE(AllModelsMatch());
+
+ // Set the passphrase. Everything should resolve.
+ GetClient(1)->service()->SetPassphrase(kValidPassphrase, true);
+ ASSERT_TRUE(GetClient(1)->AwaitPassphraseAccepted());
+ ASSERT_TRUE(GetClient(1)->AwaitMutualSyncCycleCompletion(GetClient(0)));
+ EXPECT_TRUE(AllModelsMatchVerifier());
+ EXPECT_TRUE(AllModelsMatch());
+ ASSERT_EQ(0, GetClient(1)->GetLastSessionSnapshot()->
+ num_conflicting_updates);
+
+ // Ensure everything is syncing normally by appending a final bookmark.
+ ASSERT_TRUE(AddURL(1, 5, IndexedURLTitle(5), GURL(IndexedURL(5))) != NULL);
+ ASSERT_TRUE(GetClient(1)->AwaitMutualSyncCycleCompletion(GetClient(0)));
+ EXPECT_TRUE(AllModelsMatchVerifier());
+ EXPECT_TRUE(AllModelsMatch());
+ ASSERT_EQ(0, GetClient(1)->GetLastSessionSnapshot()->
+ num_conflicting_updates);
+}
diff --git a/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc b/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
index f0aa9bb..84c8a19 100644
--- a/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
+++ b/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
@@ -251,22 +251,20 @@ IN_PROC_BROWSER_TEST_F(TwoClientSessionsSyncTest,
ScopedWindowMap client1_windows;
ASSERT_TRUE(OpenTabAndGetLocalWindows(1, GURL(kURL1),
client1_windows.GetMutable()));
- ASSERT_TRUE(GetClient(1)->AwaitMutualSyncCycleCompletion(GetClient(0)));
- ASSERT_EQ(0, GetClient(1)->GetLastSessionSnapshot()->
- num_blocking_conflicting_updates);
- ASSERT_EQ(8, GetClient(1)->GetLastSessionSnapshot()->
- num_conflicting_updates); // The same encrypted nodes.
// At this point we enter the passphrase, triggering a resync, in which the
// local changes of client 1 get overwritten for now.
GetClient(1)->service()->SetPassphrase(kValidPassphrase, true);
ASSERT_TRUE(GetClient(1)->AwaitPassphraseAccepted());
ASSERT_TRUE(GetClient(1)->WaitForTypeEncryption(syncable::SESSIONS));
+ ASSERT_EQ(0, GetClient(1)->GetLastSessionSnapshot()->
+ num_conflicting_updates);
ASSERT_TRUE(IsEncrypted(0, syncable::SESSIONS));
ASSERT_TRUE(IsEncrypted(1, syncable::SESSIONS));
// The session data from client 1 got overwritten. As a result, client 0
- // should have no foreign session data.
+ // should have no foreign session data. TODO(zea): update this once bug 76596
+ // is resolved and we don't choose server wins on encryption conflicts.
SyncedSessionVector sessions0;
SyncedSessionVector sessions1;
ASSERT_FALSE(GetSessionData(0, &sessions0));