summaryrefslogtreecommitdiffstats
path: root/chrome/browser/sync
diff options
context:
space:
mode:
authorzea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-23 01:02:31 +0000
committerzea@chromium.org <zea@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-23 01:02:31 +0000
commit59f9bdcae3a25c83f3e01a35a438843ac102e17a (patch)
treeb0acac48c435402945dd0ef9d51a4b94bec002a1 /chrome/browser/sync
parent17ee04aac27f2bcccf58e9af0163843d9cde1649 (diff)
downloadchromium_src-59f9bdcae3a25c83f3e01a35a438843ac102e17a.zip
chromium_src-59f9bdcae3a25c83f3e01a35a438843ac102e17a.tar.gz
chromium_src-59f9bdcae3a25c83f3e01a35a438843ac102e17a.tar.bz2
[Sync] Implement encryption-aware conflict resolution.
There are two primary changes in this patch. 1. Added a new syncable column PREV_SERVER_SPECIFICS. This field stores the most recent set of decryptable SERVER_SPECIFICS iff the current SERVER_SPECIFICS is not decryptable and only the specific have been modified. The field gets set in process_update_command and reset in conflict_resolver and process_update_command. 2. Due to the use of PREV_SERVER_SPECFICS, we now take into account whether a server change actually modified anything when we perform conflict resolution. If the current SERVER_SPECIFICS matches PREV_SERVER_SPECIFICS, irregardless of encryption, then we can overwrite the server data with the local data. This is possible because PREV_SERVER_SPECIFICS is only set when an update modifies specifics only (ignoring non_unique_name and mtime), and reset if a subsequent change modifies non-specifics data. As a result, we know that if PREV_SERVER_SPECIFICS are set, then we can determine whether a change is encryption only by inspecting SERVER_SPECIFICS alone. BUG=76596,105615 TEST=sync_unit_tests. Manually getting into a state where a passphrase is needed but local changes are made, and then verifying that the local changes persist after the passphrase is supplied. Review URL: http://codereview.chromium.org/8770032 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115650 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser/sync')
-rw-r--r--chrome/browser/sync/engine/apply_updates_command_unittest.cc1
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.cc131
-rw-r--r--chrome/browser/sync/engine/conflict_resolver.h13
-rw-r--r--chrome/browser/sync/engine/get_commit_ids_command.cc2
-rw-r--r--chrome/browser/sync/engine/process_updates_command.cc39
-rw-r--r--chrome/browser/sync/engine/process_updates_command.h3
-rw-r--r--chrome/browser/sync/engine/syncer_unittest.cc377
-rw-r--r--chrome/browser/sync/engine/syncer_util.cc73
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.cc19
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store.h2
-rw-r--r--chrome/browser/sync/syncable/directory_backing_store_unittest.cc159
-rw-r--r--chrome/browser/sync/syncable/syncable.h2
-rw-r--r--chrome/browser/sync/syncable/syncable_columns.h1
-rw-r--r--chrome/browser/sync/syncable/syncable_enum_conversions.cc3
-rw-r--r--chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc8
15 files changed, 634 insertions, 199 deletions
diff --git a/chrome/browser/sync/engine/apply_updates_command_unittest.cc b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
index bd09da5..19bc480 100644
--- a/chrome/browser/sync/engine/apply_updates_command_unittest.cc
+++ b/chrome/browser/sync/engine/apply_updates_command_unittest.cc
@@ -20,6 +20,7 @@
#include "chrome/browser/sync/test/engine/fake_model_worker.h"
#include "chrome/browser/sync/test/engine/syncer_command_test.h"
#include "chrome/browser/sync/test/engine/test_id_factory.h"
+#include "chrome/browser/sync/util/cryptographer.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace browser_sync {
diff --git a/chrome/browser/sync/engine/conflict_resolver.cc b/chrome/browser/sync/engine/conflict_resolver.cc
index e37ac36..cca0c31 100644
--- a/chrome/browser/sync/engine/conflict_resolver.cc
+++ b/chrome/browser/sync/engine/conflict_resolver.cc
@@ -24,7 +24,9 @@ using std::set;
using syncable::BaseTransaction;
using syncable::Directory;
using syncable::Entry;
+using syncable::GetModelTypeFromSpecifics;
using syncable::Id;
+using syncable::IsRealDataType;
using syncable::MutableEntry;
using syncable::ScopedDirLookup;
using syncable::WriteTransaction;
@@ -38,17 +40,6 @@ namespace {
const int SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT = 8;
-// Enumeration of different conflict resolutions. Used for histogramming.
-enum SimpleConflictResolutions {
- OVERWRITE_LOCAL, // Resolved by overwriting local changes.
- OVERWRITE_SERVER, // Resolved by overwriting server changes.
- UNDELETE, // Resolved by undeleting local item.
- IGNORE_ENCRYPTION, // Resolved by ignoring an encryption-only server
- // change. TODO(zea): implement and use this.
- NIGORI_MERGE, // Resolved by merging nigori nodes.
- CONFLICT_RESOLUTION_SIZE,
-};
-
} // namespace
ConflictResolver::ConflictResolver() {
@@ -61,7 +52,6 @@ void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
// An update matches local actions, merge the changes.
// This is a little fishy because we don't actually merge them.
// In the future we should do a 3-way merge.
- DVLOG(1) << "Resolving conflict by ignoring local changes:" << entry;
// With IS_UNSYNCED false, changes should be merged.
entry->Put(syncable::IS_UNSYNCED, false);
}
@@ -73,7 +63,6 @@ void ConflictResolver::OverwriteServerChanges(WriteTransaction* trans,
// made our local client changes.
// TODO(chron): This is really a general property clobber. We clobber
// the server side property. Perhaps we should actually do property merging.
- DVLOG(1) << "Resolving conflict by ignoring server changes:" << entry;
entry->Put(syncable::BASE_VERSION, entry->Get(syncable::SERVER_VERSION));
entry->Put(syncable::IS_UNAPPLIED_UPDATE, false);
}
@@ -114,8 +103,30 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
return NO_SYNC_PROGRESS;
}
+ // This logic determines "client wins" vs. "server wins" strategy picking.
+ // By the time we get to this point, we rely on the following to be true:
+ // a) We can decrypt both the local and server data (else we'd be in
+ // conflict encryption and not attempting to resolve).
+ // b) All unsynced changes have been re-encrypted with the default key (
+ // occurs either in AttemptToUpdateEntry, SetPassphrase, or
+ // RefreshEncryption).
+ // c) Prev_server_specifics having a valid datatype means that we received
+ // an undecryptable update that only changed specifics, and since then have
+ // not received any further non-specifics-only or decryptable updates.
+ // d) If the server_specifics match specifics, server_specifics are
+ // encrypted with the default key, and all other visible properties match,
+ // then we can safely ignore the local changes as redundant.
+ // e) Otherwise if the base_server_specifics match the server_specifics, no
+ // functional change must have been made server-side (else
+ // base_server_specifics would have been cleared), and we can therefore
+ // safely ignore the server changes as redundant.
+ // f) Otherwise, it's in general safer to ignore local changes, with the
+ // exception of deletion conflicts (choose to undelete) and conflicts
+ // where the non_unique_name or parent don't match.
+ // TODO(sync): We can't compare position here without performing the
+ // NEXT_ID/PREV_ID -> position_in_parent interpolation. Fix that, and take it
+ // into account.
if (!entry.Get(syncable::SERVER_IS_DEL)) {
- // This logic determines "client wins" vs. "server wins" strategy picking.
// TODO(nick): The current logic is arbitrary; instead, it ought to be made
// consistent with the ModelAssociator behavior for a datatype. It would
// be nice if we could route this back to ModelAssociator code to pick one
@@ -127,6 +138,49 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
bool parent_matches = entry.Get(syncable::PARENT_ID) ==
entry.Get(syncable::SERVER_PARENT_ID);
bool entry_deleted = entry.Get(syncable::IS_DEL);
+ const sync_pb::EntitySpecifics& specifics =
+ entry.Get(syncable::SPECIFICS);
+ const sync_pb::EntitySpecifics& server_specifics =
+ entry.Get(syncable::SERVER_SPECIFICS);
+ const sync_pb::EntitySpecifics& base_server_specifics =
+ entry.Get(syncable::BASE_SERVER_SPECIFICS);
+ std::string decrypted_specifics, decrypted_server_specifics;
+ bool specifics_match = false;
+ bool server_encrypted_with_default_key = false;
+ if (specifics.has_encrypted()) {
+ DCHECK(cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted()));
+ decrypted_specifics = cryptographer->DecryptToString(
+ specifics.encrypted());
+ } else {
+ decrypted_specifics = specifics.SerializeAsString();
+ }
+ if (server_specifics.has_encrypted()) {
+ server_encrypted_with_default_key =
+ cryptographer->CanDecryptUsingDefaultKey(
+ server_specifics.encrypted());
+ decrypted_server_specifics = cryptographer->DecryptToString(
+ server_specifics.encrypted());
+ } else {
+ decrypted_server_specifics = server_specifics.SerializeAsString();
+ }
+ if (decrypted_server_specifics == decrypted_specifics &&
+ server_encrypted_with_default_key == specifics.has_encrypted()) {
+ specifics_match = true;
+ }
+ bool base_server_specifics_match = false;
+ if (server_specifics.has_encrypted() &&
+ IsRealDataType(GetModelTypeFromSpecifics(base_server_specifics))) {
+ std::string decrypted_base_server_specifics;
+ if (!base_server_specifics.has_encrypted()) {
+ decrypted_base_server_specifics =
+ base_server_specifics.SerializeAsString();
+ } else {
+ decrypted_base_server_specifics = cryptographer->DecryptToString(
+ base_server_specifics.encrypted());
+ }
+ if (decrypted_server_specifics == decrypted_base_server_specifics)
+ base_server_specifics_match = true;
+ }
// We manually merge nigori data.
if (entry.GetModelType() == syncable::NIGORI) {
@@ -154,6 +208,8 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
.sync_tabs()) {
nigori->set_sync_tabs(true);
}
+ // We deliberately leave the server's device information. This client will
+ // add it's own device information on restart.
entry.Put(syncable::SPECIFICS, specifics);
DVLOG(1) << "Resovling simple conflict, merging nigori nodes: " << entry;
status->increment_num_server_overwrites();
@@ -161,25 +217,48 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
NIGORI_MERGE,
CONFLICT_RESOLUTION_SIZE);
- } else if (!entry_deleted && name_matches && parent_matches) {
- // TODO(zea): We may prefer to choose the local changes over the server
- // if we know the local changes happened before (or vice versa).
- // See http://crbug.com/76596
- DVLOG(1) << "Resolving simple conflict, ignoring local changes for:"
- << entry;
+ } else if (!entry_deleted && name_matches && parent_matches &&
+ specifics_match) {
+ DVLOG(1) << "Resolving simple conflict, everything matches, ignoring "
+ << "changes for: " << entry;
+ // This unsets both IS_UNSYNCED and IS_UNAPPLIED_UPDATE, and sets the
+ // BASE_VERSION to match the SERVER_VERSION. If we didn't also unset
+ // IS_UNAPPLIED_UPDATE, then we would lose unsynced positional data
+ // when the server update gets applied and the item is re-inserted into
+ // the PREV_ID/NEXT_ID linked list (see above TODO about comparing
+ // positional info).
+ OverwriteServerChanges(trans, &entry);
IgnoreLocalChanges(&entry);
- status->increment_num_local_overwrites();
UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
- OVERWRITE_LOCAL,
+ CHANGES_MATCH,
CONFLICT_RESOLUTION_SIZE);
- } else {
- DVLOG(1) << "Resolving simple conflict, overwriting server changes for:"
- << entry;
+ } else if (base_server_specifics_match) {
+ DVLOG(1) << "Resolving simple conflict, ignoring server encryption "
+ << " changes for: " << entry;
+ status->increment_num_server_overwrites();
+ OverwriteServerChanges(trans, &entry);
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ IGNORE_ENCRYPTION,
+ CONFLICT_RESOLUTION_SIZE);
+ // Now that we've resolved the conflict, clear the prev server
+ // specifics.
+ entry.Put(syncable::BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics());
+ } else if (entry_deleted || !name_matches || !parent_matches) {
OverwriteServerChanges(trans, &entry);
status->increment_num_server_overwrites();
+ DVLOG(1) << "Resolving simple conflict, overwriting server changes "
+ << "for: " << entry;
UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
OVERWRITE_SERVER,
CONFLICT_RESOLUTION_SIZE);
+ } else {
+ DVLOG(1) << "Resolving simple conflict, ignoring local changes for: "
+ << entry;
+ IgnoreLocalChanges(&entry);
+ status->increment_num_local_overwrites();
+ UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
+ OVERWRITE_LOCAL,
+ CONFLICT_RESOLUTION_SIZE);
}
return SYNC_PROGRESS;
} else { // SERVER_IS_DEL is true
@@ -205,6 +284,8 @@ ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
"when server-deleted.";
OverwriteServerChanges(trans, &entry);
status->increment_num_server_overwrites();
+ DVLOG(1) << "Resolving simple conflict, undeleting server entry: "
+ << entry;
UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict",
OVERWRITE_SERVER,
CONFLICT_RESOLUTION_SIZE);
diff --git a/chrome/browser/sync/engine/conflict_resolver.h b/chrome/browser/sync/engine/conflict_resolver.h
index b1cab71..5382f45 100644
--- a/chrome/browser/sync/engine/conflict_resolver.h
+++ b/chrome/browser/sync/engine/conflict_resolver.h
@@ -39,6 +39,19 @@ class ConflictResolver {
FRIEND_TEST_ALL_PREFIXES(SyncerTest,
ConflictResolverMergeOverwritesLocalEntry);
public:
+ // Enumeration of different conflict resolutions. Used for histogramming.
+ enum SimpleConflictResolutions {
+ OVERWRITE_LOCAL, // Resolved by overwriting local changes.
+ OVERWRITE_SERVER, // Resolved by overwriting server changes.
+ UNDELETE, // Resolved by undeleting local item.
+ IGNORE_ENCRYPTION, // Resolved by ignoring an encryption-only server
+ // change.
+ NIGORI_MERGE, // Resolved by merging nigori nodes.
+ CHANGES_MATCH, // Resolved by ignoring both local and server
+ // changes because they matched.
+ CONFLICT_RESOLUTION_SIZE,
+ };
+
ConflictResolver();
~ConflictResolver();
// Called by the syncer at the end of a update/commit cycle.
diff --git a/chrome/browser/sync/engine/get_commit_ids_command.cc b/chrome/browser/sync/engine/get_commit_ids_command.cc
index fa847f7..720c063 100644
--- a/chrome/browser/sync/engine/get_commit_ids_command.cc
+++ b/chrome/browser/sync/engine/get_commit_ids_command.cc
@@ -87,7 +87,6 @@ bool IsEntryReadyForCommit(syncable::ModelTypeSet encrypted_types,
// The local and server versions don't match. The item must be in
// conflict, so there's no point in attempting to commit.
DCHECK(entry.Get(syncable::IS_UNAPPLIED_UPDATE)); // In conflict.
- // TODO(zea): switch this to DVLOG once it's clear bug 100660 is fixed.
DVLOG(1) << "Excluding entry from commit due to version mismatch "
<< entry;
return false;
@@ -104,7 +103,6 @@ bool IsEntryReadyForCommit(syncable::ModelTypeSet encrypted_types,
// This entry requires encryption but is not properly encrypted (possibly
// due to the cryptographer not being initialized or the user hasn't
// provided the most recent passphrase).
- // TODO(zea): switch this to DVLOG once it's clear bug 100660 is fixed.
DVLOG(1) << "Excluding entry from commit due to lack of encryption "
<< entry;
return false;
diff --git a/chrome/browser/sync/engine/process_updates_command.cc b/chrome/browser/sync/engine/process_updates_command.cc
index 4781250..7ab56d5 100644
--- a/chrome/browser/sync/engine/process_updates_command.cc
+++ b/chrome/browser/sync/engine/process_updates_command.cc
@@ -15,6 +15,7 @@
#include "chrome/browser/sync/sessions/sync_session.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/syncable.h"
+#include "chrome/browser/sync/util/cryptographer.h"
using std::vector;
@@ -54,7 +55,9 @@ void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncSession* session) {
if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
continue;
- switch (ProcessUpdate(dir, update, &trans)) {
+ switch (ProcessUpdate(dir, update,
+ session->context()->directory_manager()->GetCryptographer(&trans),
+ &trans)) {
case SUCCESS_PROCESSED:
case SUCCESS_STORED:
break;
@@ -91,6 +94,7 @@ bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
const syncable::ScopedDirLookup& dir,
const sync_pb::SyncEntity& proto_update,
+ const Cryptographer* cryptographer,
syncable::WriteTransaction* const trans) {
const SyncEntity& update = *static_cast<const SyncEntity*>(&proto_update);
@@ -144,6 +148,39 @@ ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
target_entry.Put(syncable::IS_UNAPPLIED_UPDATE, true);
}
+ // If this is a newly received undecryptable update, and the only thing that
+ // has changed are the specifics, store the original decryptable specifics,
+ // (on which any current or future local changes are based) before we
+ // overwrite SERVER_SPECIFICS.
+ // MTIME, CTIME, and NON_UNIQUE_NAME are not enforced.
+ if (!update.deleted() && !target_entry.Get(syncable::SERVER_IS_DEL) &&
+ (update.parent_id() == target_entry.Get(syncable::SERVER_PARENT_ID)) &&
+ (update.position_in_parent() ==
+ target_entry.Get(syncable::SERVER_POSITION_IN_PARENT)) &&
+ update.has_specifics() && update.specifics().has_encrypted() &&
+ !cryptographer->CanDecrypt(update.specifics().encrypted())) {
+ sync_pb::EntitySpecifics prev_specifics =
+ target_entry.Get(syncable::SERVER_SPECIFICS);
+ // We only store the old specifics if they were decryptable and applied and
+ // there is no BASE_SERVER_SPECIFICS already. Else do nothing.
+ if (!target_entry.Get(syncable::IS_UNAPPLIED_UPDATE) &&
+ !syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics(
+ target_entry.Get(syncable::BASE_SERVER_SPECIFICS))) &&
+ (!prev_specifics.has_encrypted() ||
+ cryptographer->CanDecrypt(prev_specifics.encrypted()))) {
+ DVLOG(2) << "Storing previous server specifcs: "
+ << prev_specifics.SerializeAsString();
+ target_entry.Put(syncable::BASE_SERVER_SPECIFICS, prev_specifics);
+ }
+ } else if (syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics(
+ target_entry.Get(syncable::BASE_SERVER_SPECIFICS)))) {
+ // We have a BASE_SERVER_SPECIFICS, but a subsequent non-specifics-only
+ // change arrived. As a result, we can't use the specifics alone to detect
+ // changes, so we clear BASE_SERVER_SPECIFICS.
+ target_entry.Put(syncable::BASE_SERVER_SPECIFICS,
+ sync_pb::EntitySpecifics());
+ }
+
SyncerUtil::UpdateServerFieldsFromUpdate(&target_entry, update, name);
return SUCCESS_PROCESSED;
diff --git a/chrome/browser/sync/engine/process_updates_command.h b/chrome/browser/sync/engine/process_updates_command.h
index 862884f..d5dd2be 100644
--- a/chrome/browser/sync/engine/process_updates_command.h
+++ b/chrome/browser/sync/engine/process_updates_command.h
@@ -21,6 +21,8 @@ class SyncEntity;
namespace browser_sync {
+class Cryptographer;
+
// A syncer command for processing updates.
//
// Preconditions - updates in the SyncerSesssion have been downloaded
@@ -45,6 +47,7 @@ class ProcessUpdatesCommand : public ModelChangingSyncerCommand {
ServerUpdateProcessingResult ProcessUpdate(
const syncable::ScopedDirLookup& dir,
const sync_pb::SyncEntity& proto_update,
+ const Cryptographer* cryptographer,
syncable::WriteTransaction* const trans);
DISALLOW_COPY_AND_ASSIGN(ProcessUpdatesCommand);
};
diff --git a/chrome/browser/sync/engine/syncer_unittest.cc b/chrome/browser/sync/engine/syncer_unittest.cc
index 8ff3a44..611aabf 100644
--- a/chrome/browser/sync/engine/syncer_unittest.cc
+++ b/chrome/browser/sync/engine/syncer_unittest.cc
@@ -35,6 +35,7 @@
#include "chrome/browser/sync/protocol/sync.pb.h"
#include "chrome/browser/sync/protocol/bookmark_specifics.pb.h"
#include "chrome/browser/sync/protocol/nigori_specifics.pb.h"
+#include "chrome/browser/sync/protocol/preference_specifics.pb.h"
#include "chrome/browser/sync/sessions/sync_session_context.h"
#include "chrome/browser/sync/syncable/directory_manager.h"
#include "chrome/browser/sync/syncable/model_type.h"
@@ -89,6 +90,7 @@ using syncable::NEXT_ID;
using syncable::NON_UNIQUE_NAME;
using syncable::PARENT_ID;
using syncable::PREV_ID;
+using syncable::BASE_SERVER_SPECIFICS;
using syncable::SERVER_IS_DEL;
using syncable::SERVER_NON_UNIQUE_NAME;
using syncable::SERVER_PARENT_ID;
@@ -180,9 +182,9 @@ class SyncerTest : public testing::Test,
}
bool SyncShareAsDelegate() {
- scoped_ptr<SyncSession> session(MakeSession());
- syncer_->SyncShare(session.get(), SYNCER_BEGIN, SYNCER_END);
- return session->HasMoreToSync();
+ session_.reset(MakeSession());
+ syncer_->SyncShare(session_.get(), SYNCER_BEGIN, SYNCER_END);
+ return session_->HasMoreToSync();
}
void LoopSyncShare() {
@@ -201,6 +203,8 @@ class SyncerTest : public testing::Test,
new MockConnectionManager(syncdb_.manager(), syncdb_.name()));
EnableDatatype(syncable::BOOKMARKS);
EnableDatatype(syncable::NIGORI);
+ EnableDatatype(syncable::PREFERENCES);
+ EnableDatatype(syncable::NIGORI);
worker_ = new FakeModelWorker(GROUP_PASSIVE);
std::vector<SyncEngineEventListener*> listeners;
listeners.push_back(this);
@@ -563,7 +567,6 @@ TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
const syncable::ModelTypeSet throttled_types(syncable::BOOKMARKS);
- KeyParams key_params = {"localhost", "dummy", "foobar"};
sync_pb::EntitySpecifics bookmark_data;
AddDefaultExtensionValue(syncable::BOOKMARKS, &bookmark_data);
@@ -607,13 +610,31 @@ TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
}
}
+// We use a macro so we can preserve the error location.
+#define VERIFY_ENTRY(id, is_unapplied, is_unsynced, prev_initialized, \
+ parent_id, version, server_version, id_fac, rtrans) \
+ do { \
+ Entry entryA(rtrans, syncable::GET_BY_ID, id_fac.FromNumber(id)); \
+ ASSERT_TRUE(entryA.good()); \
+ EXPECT_EQ(is_unsynced, entryA.Get(IS_UNSYNCED)); \
+ EXPECT_EQ(is_unapplied, entryA.Get(IS_UNAPPLIED_UPDATE)); \
+ EXPECT_EQ(prev_initialized, \
+ syncable::IsRealDataType(syncable::GetModelTypeFromSpecifics( \
+ entryA.Get(BASE_SERVER_SPECIFICS)))); \
+ EXPECT_TRUE(parent_id == -1 || \
+ entryA.Get(PARENT_ID) == id_fac.FromNumber(parent_id)); \
+ EXPECT_EQ(version, entryA.Get(BASE_VERSION)); \
+ EXPECT_EQ(server_version, entryA.Get(SERVER_VERSION)); \
+ } while (0)
+
TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
KeyParams key_params = {"localhost", "dummy", "foobar"};
- sync_pb::EncryptedData encrypted;
- sync_pb::EntitySpecifics encrypted_bookmark;
- encrypted_bookmark.mutable_encrypted();
+ KeyParams other_params = {"localhost", "dummy", "foobar2"};
+ sync_pb::EntitySpecifics bookmark, encrypted_bookmark;
+ bookmark.MutableExtension(sync_pb::bookmark)->set_url("url");
+ bookmark.MutableExtension(sync_pb::bookmark)->set_title("title");
AddDefaultExtensionValue(syncable::BOOKMARKS, &encrypted_bookmark);
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10);
mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10);
@@ -627,12 +648,17 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
// keys.
WriteTransaction wtrans(FROM_HERE, UNITTEST, dir);
browser_sync::Cryptographer other_cryptographer;
- other_cryptographer.AddKey(key_params);
+ other_cryptographer.AddKey(other_params);
sync_pb::EntitySpecifics specifics;
sync_pb::NigoriSpecifics* nigori =
specifics.MutableExtension(sync_pb::nigori);
other_cryptographer.GetKeys(nigori->mutable_encrypted());
nigori->set_encrypt_bookmarks(true);
+ // Set up with an old passphrase, but have pending keys
+ syncdb_.manager()->GetCryptographer(&wtrans)->AddKey(key_params);
+ syncdb_.manager()->GetCryptographer(&wtrans)->Encrypt(
+ bookmark,
+ encrypted_bookmark.mutable_encrypted());
syncdb_.manager()->GetCryptographer(&wtrans)->Update(*nigori);
// In conflict but properly encrypted.
@@ -663,55 +689,36 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
{
// We remove any unready entries from the status controller's unsynced
// handles, so this should remain 0 even though the entries didn't commit.
- ASSERT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
// Nothing should have commited due to bookmarks being encrypted and
// the cryptographer having pending keys. A would have been resolved
// as a simple conflict, but still be unsynced until the next sync cycle.
ReadTransaction rtrans(FROM_HERE, dir);
- Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entryA.good());
- EXPECT_TRUE(entryA.Get(IS_UNSYNCED));
- Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entryB.good());
- EXPECT_TRUE(entryB.Get(IS_UNSYNCED));
- Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(entryC.good());
- EXPECT_TRUE(entryC.Get(IS_UNSYNCED));
- Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(entryD.good());
- EXPECT_TRUE(entryD.Get(IS_UNSYNCED));
+ VERIFY_ENTRY(1, false, true, false, 0, 20, 20, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, true, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
// Resolve the pending keys.
syncdb_.manager()->GetCryptographer(&rtrans)->DecryptPendingKeys(
- key_params);
+ other_params);
}
SyncShareAsDelegate();
{
- ASSERT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // 2 unsynced handles to reflect the items that committed succesfully.
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
// All properly encrypted and non-conflicting items should commit. "A" was
// conflicting, but last sync cycle resolved it as simple conflict, so on
// this sync cycle it committed succesfullly.
ReadTransaction rtrans(FROM_HERE, dir);
// Committed successfully.
- Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entryA.good());
- EXPECT_FALSE(entryA.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryA.Get(IS_UNAPPLIED_UPDATE));
+ VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
// Committed successfully.
- Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entryB.good());
- EXPECT_FALSE(entryB.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryB.Get(IS_UNAPPLIED_UPDATE));
+ VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
// Was not properly encrypted.
- Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(entryC.good());
- EXPECT_TRUE(entryC.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryC.Get(IS_UNAPPLIED_UPDATE));
+ VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
// Was not properly encrypted.
- Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(entryD.good());
- EXPECT_TRUE(entryD.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryD.Get(IS_UNAPPLIED_UPDATE));
+ VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
}
{
// Fix the remaining items.
@@ -727,28 +734,205 @@ TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
}
SyncShareAsDelegate();
{
- ASSERT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // We attempted to commit two items.
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
// None should be unsynced anymore.
ReadTransaction rtrans(FROM_HERE, dir);
- Entry entryA(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(1));
- ASSERT_TRUE(entryA.good());
- EXPECT_FALSE(entryA.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryA.Get(IS_UNAPPLIED_UPDATE));
- Entry entryB(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(2));
- ASSERT_TRUE(entryB.good());
- EXPECT_FALSE(entryB.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryB.Get(IS_UNAPPLIED_UPDATE));
- Entry entryC(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(3));
- ASSERT_TRUE(entryC.good());
- EXPECT_FALSE(entryC.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryC.Get(IS_UNAPPLIED_UPDATE));
- Entry entryD(&rtrans, syncable::GET_BY_ID, ids_.FromNumber(4));
- ASSERT_TRUE(entryD.good());
- EXPECT_FALSE(entryD.Get(IS_UNSYNCED));
- EXPECT_FALSE(entryD.Get(IS_UNAPPLIED_UPDATE));
+ VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 0, 11, 11, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 11, 11, ids_, &rtrans);
}
}
+TEST_F(SyncerTest, EncryptionAwareConflicts) {
+ ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
+ ASSERT_TRUE(dir.good());
+ KeyParams key_params = {"localhost", "dummy", "foobar"};
+ browser_sync::Cryptographer other_cryptographer;
+ other_cryptographer.AddKey(key_params);
+ sync_pb::EntitySpecifics bookmark, encrypted_bookmark, modified_bookmark;
+ bookmark.MutableExtension(sync_pb::bookmark)->set_title("title");
+ other_cryptographer.Encrypt(bookmark,
+ encrypted_bookmark.mutable_encrypted());
+ AddDefaultExtensionValue(syncable::BOOKMARKS, &encrypted_bookmark);
+ modified_bookmark.MutableExtension(sync_pb::bookmark)->set_title("title2");
+ other_cryptographer.Encrypt(modified_bookmark,
+ modified_bookmark.mutable_encrypted());
+ sync_pb::EntitySpecifics pref, encrypted_pref, modified_pref;
+ pref.MutableExtension(sync_pb::preference)->set_name("name");
+ AddDefaultExtensionValue(syncable::PREFERENCES, &encrypted_pref);
+ other_cryptographer.Encrypt(pref,
+ encrypted_pref.mutable_encrypted());
+ modified_pref.MutableExtension(sync_pb::preference)->set_name("name2");
+ other_cryptographer.Encrypt(modified_pref,
+ modified_pref.mutable_encrypted());
+ {
+ // Mark bookmarks and preferences as encrypted and set the cryptographer to
+ // have pending keys.
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir);
+ sync_pb::EntitySpecifics specifics;
+ sync_pb::NigoriSpecifics* nigori =
+ specifics.MutableExtension(sync_pb::nigori);
+ other_cryptographer.GetKeys(nigori->mutable_encrypted());
+ nigori->set_encrypt_bookmarks(true);
+ nigori->set_encrypt_preferences(true);
+ syncdb_.manager()->GetCryptographer(&wtrans)->Update(*nigori);
+ EXPECT_TRUE(syncdb_.manager()->GetCryptographer(&wtrans)->
+ has_pending_keys());
+ }
+
+ mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark);
+ mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark);
+ mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Initial state. Everything is normal.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
+ }
+
+ // Server side encryption will not be applied due to undecryptable data.
+ // At this point, BASE_SERVER_SPECIFICS should be filled for all four items.
+ mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 20, 20, true, 0,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 20, 20, false, 2,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 20, 20, false, 1,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 20, 20, false, 0,
+ encrypted_pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // All should be unapplied due to being undecryptable and have a valid
+ // BASE_SERVER_SPECIFICS.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, true, false, true, 0, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 20, ids_, &rtrans);
+ }
+
+ // Server side change that don't modify anything should not affect
+ // BASE_SERVER_SPECIFICS (such as name changes and mtime changes).
+ mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 30, 30, true, 0,
+ encrypted_bookmark);
+ mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 30, 30, false, 2,
+ encrypted_bookmark);
+ // Item 3 doesn't change.
+ mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 30, 30, false, 0,
+ encrypted_pref);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Items 1, 2, and 4 should have newer server versions, 3 remains the same.
+ // All should remain unapplied due to be undecryptable.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, true, false, true, 0, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ // Positional changes, parent changes, and specifics changes should reset
+ // BASE_SERVER_SPECIFICS.
+ // Became unencrypted.
+ mock_server_->AddUpdateSpecifics(1, 0, "A", 40, 40, true, 0, bookmark);
+ // Reordered to after item 2.
+ mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 30, 30, false, 3,
+ encrypted_bookmark);
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
+ // Items 1 is now unencrypted, so should have applied normally.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, false, false, false, 0, 40, 40, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, false, false, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ // Make local changes, which should remain unsynced for items 2, 3, 4.
+ {
+ WriteTransaction wtrans(FROM_HERE, UNITTEST, dir);
+ MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
+ ASSERT_TRUE(A.good());
+ A.Put(SPECIFICS, modified_bookmark);
+ A.Put(NON_UNIQUE_NAME, kEncryptedString);
+ A.Put(IS_UNSYNCED, true);
+ MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
+ ASSERT_TRUE(B.good());
+ B.Put(SPECIFICS, modified_bookmark);
+ B.Put(NON_UNIQUE_NAME, kEncryptedString);
+ B.Put(IS_UNSYNCED, true);
+ MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
+ ASSERT_TRUE(C.good());
+ C.Put(SPECIFICS, modified_bookmark);
+ C.Put(NON_UNIQUE_NAME, kEncryptedString);
+ C.Put(IS_UNSYNCED, true);
+ MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
+ ASSERT_TRUE(D.good());
+ D.Put(SPECIFICS, modified_pref);
+ D.Put(NON_UNIQUE_NAME, kEncryptedString);
+ D.Put(IS_UNSYNCED, true);
+ }
+ SyncShareAsDelegate();
+ {
+ EXPECT_EQ(0U, session_->status_controller().unsynced_handles().size());
+ // Item 1 remains unsynced due to there being pending keys.
+ // Items 2, 3, 4 should remain unsynced since they were not up to date.
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, false, true, false, 0, 40, 40, ids_, &rtrans);
+ VERIFY_ENTRY(2, true, true, true, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(3, true, true, false, 1, 10, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, true, true, true, 0, 10, 30, ids_, &rtrans);
+ }
+
+ {
+ ReadTransaction rtrans(FROM_HERE, dir);
+ // Resolve the pending keys.
+ syncdb_.manager()->GetCryptographer(&rtrans)->DecryptPendingKeys(
+ key_params);
+ }
+ // First cycle resolves conflicts, second cycle commits changes.
+ SyncShareAsDelegate();
+ EXPECT_EQ(2, session_->status_controller().syncer_status().
+ num_server_overwrites);
+ EXPECT_EQ(1, session_->status_controller().syncer_status().
+ num_local_overwrites);
+ // We attempted to commit item 1.
+ EXPECT_EQ(1U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
+ SyncShareAsDelegate();
+ {
+ // Everything should be resolved now. The local changes should have
+ // overwritten the server changes for 2 and 4, while the server changes
+ // overwrote the local for entry 3.
+ // We attempted to commit two handles.
+ EXPECT_EQ(0, session_->status_controller().syncer_status().
+ num_server_overwrites);
+ EXPECT_EQ(0, session_->status_controller().syncer_status().
+ num_local_overwrites);
+ EXPECT_EQ(2U, session_->status_controller().unsynced_handles().size());
+ EXPECT_TRUE(session_->status_controller().did_commit_items());
+ ReadTransaction rtrans(FROM_HERE, dir);
+ VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
+ VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
+ VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
+ VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
+ }
+}
+
+#undef VERIFY_ENTRY
+
TEST_F(SyncerTest, NigoriConflicts) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
ASSERT_TRUE(dir.good());
@@ -878,7 +1062,6 @@ TEST_F(SyncerTest, NigoriConflicts) {
}
}
-
// TODO(chron): More corner case unit tests around validation.
TEST_F(SyncerTest, TestCommitMetahandleIterator) {
ScopedDirLookup dir(syncdb_.manager(), syncdb_.name());
@@ -4147,14 +4330,12 @@ class SyncerUndeletionTest : public SyncerTest {
};
TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Delete, begin committing the delete, then undelete while committing.
@@ -4166,7 +4347,7 @@ TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
// The item ought to exist as an unsynced undeletion (meaning,
// we think that the next commit ought to be a recreation commit).
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectUnsyncedUndeletion();
@@ -4177,20 +4358,18 @@ TEST_F(SyncerUndeletionTest, UndeleteDuringCommit) {
mock_server_->SetMidCommitCallback(base::Closure());
mock_server_->AddUpdateTombstone(Get(metahandle_, ID));
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Delete and undelete, then sync to pick up the result.
@@ -4201,7 +4380,7 @@ TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
SyncShareAsDelegate();
// The item ought to have committed successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
EXPECT_EQ(2, Get(metahandle_, BASE_VERSION));
@@ -4210,20 +4389,18 @@ TEST_F(SyncerUndeletionTest, UndeleteBeforeCommit) {
// update.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Delete and commit.
@@ -4232,7 +4409,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
SyncShareAsDelegate();
// The item ought to have committed successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4244,26 +4421,24 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterCommitButBeforeGetUpdates) {
// deletion update. The undeletion should prevail.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Delete and commit.
@@ -4272,7 +4447,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
SyncShareAsDelegate();
// The item ought to have committed successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4280,7 +4455,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
// deletion update. Should be consistent.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4291,28 +4466,26 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterDeleteAndGetUpdates) {
// Now, encounter a GetUpdates corresponding to the just-committed
// deletion update. The undeletion should prevail.
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
// Test processing of undeletion GetUpdateses.
TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Add a delete from the server.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Some other client deletes the item.
@@ -4320,7 +4493,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
SyncShareAsDelegate();
// The update ought to have applied successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4328,7 +4501,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
Undelete();
ExpectUnsyncedUndeletion();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
@@ -4336,20 +4509,18 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletes) {
// deletion update. The undeletion should prevail.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Some other client deletes the item before we get a chance
@@ -4358,7 +4529,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
SyncShareAsDelegate();
// The update ought to have applied successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4366,7 +4537,7 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
Undelete();
ExpectUnsyncedUndeletion();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
@@ -4374,27 +4545,25 @@ TEST_F(SyncerUndeletionTest, UndeleteAfterOtherClientDeletesImmediately) {
// deletion update. The undeletion should prevail.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
}
TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Get the updates of our just-committed entry.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// We delete the item.
@@ -4403,7 +4572,7 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
SyncShareAsDelegate();
// The update ought to have applied successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4411,7 +4580,7 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
// deletion update.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4421,28 +4590,26 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletes) {
"Thadeusz", 100, 1000);
mock_server_->SetLastUpdateClientTag(client_tag_);
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
}
TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
- const StatusController& status = session_->status_controller();
-
Create();
ExpectUnsyncedCreation();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// Get the updates of our just-committed entry.
mock_server_->AddUpdateFromLastCommit();
SyncShareAsDelegate();
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
ExpectSyncedAndCreated();
// We delete the item.
@@ -4451,7 +4618,7 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
SyncShareAsDelegate();
// The update ought to have applied successfully.
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndDeleted();
@@ -4462,7 +4629,7 @@ TEST_F(SyncerUndeletionTest, OtherClientUndeletesImmediately) {
"Thadeusz", 100, 1000);
mock_server_->SetLastUpdateClientTag(client_tag_);
SyncShareAsDelegate();
- EXPECT_EQ(0, status.TotalNumConflictingItems());
+ EXPECT_EQ(0, session_->status_controller().TotalNumConflictingItems());
EXPECT_EQ(1, mock_server_->GetAndClearNumGetUpdatesRequests());
ExpectSyncedAndCreated();
EXPECT_EQ("Thadeusz", Get(metahandle_, NON_UNIQUE_NAME));
diff --git a/chrome/browser/sync/engine/syncer_util.cc b/chrome/browser/sync/engine/syncer_util.cc
index 2039168..bf1456e 100644
--- a/chrome/browser/sync/engine/syncer_util.cc
+++ b/chrome/browser/sync/engine/syncer_util.cc
@@ -10,6 +10,7 @@
#include <vector>
#include "base/location.h"
+#include "base/metrics/histogram.h"
#include "chrome/browser/sync/engine/conflict_resolver.h"
#include "chrome/browser/sync/engine/nigori_util.h"
#include "chrome/browser/sync/engine/syncer_proto_util.h"
@@ -23,6 +24,7 @@
#include "chrome/browser/sync/syncable/model_type.h"
#include "chrome/browser/sync/syncable/syncable.h"
#include "chrome/browser/sync/syncable/syncable_changes_version.h"
+#include "chrome/browser/sync/util/cryptographer.h"
#include "chrome/browser/sync/util/time.h"
using syncable::BASE_VERSION;
@@ -33,6 +35,7 @@ using syncable::CREATE_NEW_UPDATE_ITEM;
using syncable::CTIME;
using syncable::Directory;
using syncable::Entry;
+using syncable::GetModelTypeFromSpecifics;
using syncable::GET_BY_HANDLE;
using syncable::GET_BY_ID;
using syncable::ID;
@@ -41,11 +44,13 @@ using syncable::IS_DIR;
using syncable::IS_UNAPPLIED_UPDATE;
using syncable::IS_UNSYNCED;
using syncable::Id;
+using syncable::IsRealDataType;
using syncable::META_HANDLE;
using syncable::MTIME;
using syncable::MutableEntry;
using syncable::NEXT_ID;
using syncable::NON_UNIQUE_NAME;
+using syncable::BASE_SERVER_SPECIFICS;
using syncable::PARENT_ID;
using syncable::PREV_ID;
using syncable::ReadTransaction;
@@ -301,6 +306,33 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
}
}
+ // Only apply updates that we can decrypt. If we can't decrypt the update, it
+ // is likely because the passphrase has not arrived yet. Because the
+ // passphrase may not arrive within this GetUpdates, we can't just return
+ // conflict, else we try to perform normal conflict resolution prematurely or
+ // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is
+ // treated as a non-blocking conflict. See the description in syncer_types.h.
+ // This prevents any unsynced changes from commiting and postpones conflict
+ // resolution until all data can be decrypted.
+ if (specifics.has_encrypted() &&
+ !cryptographer->CanDecrypt(specifics.encrypted())) {
+ // We can't decrypt this node yet.
+ DVLOG(1) << "Received an undecryptable "
+ << syncable::ModelTypeToString(entry->GetServerModelType())
+ << " update, returning encryption_conflict.";
+ return CONFLICT_ENCRYPTION;
+ } else if (specifics.HasExtension(sync_pb::password) &&
+ entry->Get(UNIQUE_SERVER_TAG).empty()) {
+ // Passwords use their own legacy encryption scheme.
+ const sync_pb::PasswordSpecifics& password =
+ specifics.GetExtension(sync_pb::password);
+ if (!cryptographer->CanDecrypt(password.encrypted())) {
+ DVLOG(1) << "Received an undecryptable password update, returning "
+ << "encryption_conflict.";
+ return CONFLICT_ENCRYPTION;
+ }
+ }
+
if (entry->Get(IS_UNSYNCED)) {
DVLOG(1) << "Skipping update, returning conflict for: " << id
<< " ; it's unsynced.";
@@ -336,39 +368,14 @@ UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
}
}
- // Only apply updates that we can decrypt. If we can't decrypt the update, it
- // is likely because the passphrase has not arrived yet. Because the
- // passphrase may not arrive within this GetUpdates, we can't just return
- // conflict, else the syncer gets stuck. As such, we return
- // CONFLICT_ENCRYPTION, which is treated as a non-blocking conflict. See the
- // description in syncer_types.h.
- if (specifics.has_encrypted() &&
- !cryptographer->CanDecrypt(specifics.encrypted())) {
- // We can't decrypt this node yet.
- DVLOG(1) << "Received an undecryptable "
+ if (specifics.has_encrypted()) {
+ DVLOG(2) << "Received a decryptable "
<< syncable::ModelTypeToString(entry->GetServerModelType())
- << " update, returning encryption_conflict.";
- return CONFLICT_ENCRYPTION;
- } else if (specifics.HasExtension(sync_pb::password) &&
- entry->Get(UNIQUE_SERVER_TAG).empty()) {
- // Passwords use their own legacy encryption scheme.
- const sync_pb::PasswordSpecifics& password =
- specifics.GetExtension(sync_pb::password);
- if (!cryptographer->CanDecrypt(password.encrypted())) {
- DVLOG(1) << "Received an undecryptable password update, returning "
- << "encryption_conflict.";
- return CONFLICT_ENCRYPTION;
- }
+ << " update, applying normally.";
} else {
- if (specifics.has_encrypted()) {
- DVLOG(2) << "Received a decryptable "
- << syncable::ModelTypeToString(entry->GetServerModelType())
- << " update, applying normally.";
- } else {
- DVLOG(2) << "Received an unencrypted "
- << syncable::ModelTypeToString(entry->GetServerModelType())
- << " update, applying normally.";
- }
+ DVLOG(2) << "Received an unencrypted "
+ << syncable::ModelTypeToString(entry->GetServerModelType())
+ << " update, applying normally.";
}
SyncerUtil::UpdateLocalDataFromServerData(trans, entry);
@@ -475,7 +482,7 @@ void SyncerUtil::UpdateServerFieldsFromUpdate(
// static
void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
const syncable::Id& id) {
- syncable::MutableEntry entry(trans, syncable::GET_BY_ID, id);
+ syncable::MutableEntry entry(trans, GET_BY_ID, id);
if (!entry.good()) {
syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
id);
@@ -510,6 +517,8 @@ void SyncerUtil::UpdateLocalDataFromServerData(
DVLOG(2) << "Updating entry : " << *entry;
// Start by setting the properties that determine the model_type.
entry->Put(SPECIFICS, entry->Get(SERVER_SPECIFICS));
+ // Clear the previous server specifics now that we're applying successfully.
+ entry->Put(BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics());
entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR));
// This strange dance around the IS_DEL flag avoids problems when setting
// the name.
diff --git a/chrome/browser/sync/syncable/directory_backing_store.cc b/chrome/browser/sync/syncable/directory_backing_store.cc
index 1f5294d..ac36232 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store.cc
@@ -42,7 +42,7 @@ static const string::size_type kUpdateStatementBufferSize = 2048;
// Increment this version whenever updating DB tables.
extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
-const int32 kCurrentDBVersion = 77;
+const int32 kCurrentDBVersion = 78;
namespace {
@@ -476,6 +476,12 @@ DirOpenResult DirectoryBackingStore::InitializeTables() {
version_on_disk = 77;
}
+ // Version 78 added the column base_server_specifics to the metas table.
+ if (version_on_disk == 77) {
+ if (MigrateVersion77To78())
+ version_on_disk = 78;
+ }
+
// If one of the migrations requested it, drop columns that aren't current.
// It's only safe to do this after migrating all the way to the current
// version.
@@ -1118,6 +1124,17 @@ bool DirectoryBackingStore::MigrateVersion76To77() {
return true;
}
+bool DirectoryBackingStore::MigrateVersion77To78() {
+ // Version 78 added one column to table 'metas': base_server_specifics.
+ int result =
+ ExecQuery(load_dbhandle_,
+ "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB");
+ if (result != SQLITE_DONE)
+ return false;
+ SetVersion(78);
+ return true;
+}
+
int DirectoryBackingStore::CreateTables() {
DVLOG(1) << "First run, creating tables";
// Create two little tables share_version and share_info
diff --git a/chrome/browser/sync/syncable/directory_backing_store.h b/chrome/browser/sync/syncable/directory_backing_store.h
index 460da33..697675a 100644
--- a/chrome/browser/sync/syncable/directory_backing_store.h
+++ b/chrome/browser/sync/syncable/directory_backing_store.h
@@ -85,6 +85,7 @@ class DirectoryBackingStore {
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion74To75);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion75To76);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion76To77);
+ FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, MigrateVersion77To78);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, ModelTypeIds);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, Corruption);
FRIEND_TEST_ALL_PREFIXES(DirectoryBackingStoreTest, DeleteEntries);
@@ -183,6 +184,7 @@ class DirectoryBackingStore {
bool MigrateVersion74To75();
bool MigrateVersion75To76();
bool MigrateVersion76To77();
+ bool MigrateVersion77To78();
// The handle to our sqlite on-disk store for initialization and loading, and
// for saving changes periodically via SaveChanges, respectively.
diff --git a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
index 461fac2..1179bf2 100644
--- a/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
+++ b/chrome/browser/sync/syncable/directory_backing_store_unittest.cc
@@ -53,6 +53,7 @@ class MigrationTest : public testing::TestWithParam<int> {
void SetUpVersion74Database();
void SetUpVersion75Database();
void SetUpVersion76Database();
+ void SetUpVersion77Database();
void SetUpCurrentDatabaseAndCheckVersion() {
SetUpVersion70Database(); // Prepopulates data.
@@ -206,12 +207,18 @@ class DirectoryBackingStoreTest : public MigrationTest {};
// functions.
#define LEGACY_META_PROTO_TIMES(x) LEGACY_META_PROTO_TIMES_##x
#define LEGACY_META_PROTO_TIMES_STR(x) LEGACY_META_PROTO_TIMES_STR_##x
-#define META_PROTO_TIMES(x) META_PROTO_TIMES_##x
#define LEGACY_PROTO_TIME_VALS(x) \
LEGACY_META_PROTO_TIMES_STR(x) "," \
LEGACY_META_PROTO_TIMES_STR(x) "," \
LEGACY_META_PROTO_TIMES_STR(x) "," \
LEGACY_META_PROTO_TIMES_STR(x)
+#define META_PROTO_TIMES(x) META_PROTO_TIMES_##x
+#define META_PROTO_TIMES_STR(x) META_PROTO_TIMES_STR_##x
+#define META_PROTO_TIMES_VALS(x) \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x) "," \
+ META_PROTO_TIMES_STR(x)
namespace {
@@ -1434,6 +1441,98 @@ void MigrationTest::SetUpVersion76Database() {
ASSERT_TRUE(connection.CommitTransaction());
}
+void MigrationTest::SetUpVersion77Database() {
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ ASSERT_TRUE(connection.BeginTransaction());
+ ASSERT_TRUE(connection.Execute(
+ "CREATE TABLE share_version (id VARCHAR(128) primary key, data INT);"
+ "INSERT INTO 'share_version' VALUES('nick@chromium.org',77);"
+ "CREATE TABLE models (model_id BLOB primary key, progress_marker BLOB, in"
+ "itial_sync_ended BOOLEAN default 0);"
+ "INSERT INTO 'models' VALUES(X'C2881000',X'0888810218B605',1);"
+ "CREATE TABLE 'metas'(metahandle bigint primary key ON CONFLICT FAIL,base"
+ "_version bigint default -1,server_version bigint default 0,server_po"
+ "sition_in_parent bigint default 0,local_external_id bigint default 0"
+ ",mtime bigint default 0,server_mtime bigint default 0,ctime bigint d"
+ "efault 0,server_ctime bigint default 0,id varchar(255) default 'r',p"
+ "arent_id varchar(255) default 'r',server_parent_id varchar(255) defa"
+ "ult 'r',prev_id varchar(255) default 'r',next_id varchar(255) defaul"
+ "t 'r',is_unsynced bit default 0,is_unapplied_update bit default 0,is"
+ "_del bit default 0,is_dir bit default 0,server_is_dir bit default 0,"
+ "server_is_del bit default 0,non_unique_name varchar,server_non_uniqu"
+ "e_name varchar(255),unique_server_tag varchar,unique_client_tag varc"
+ "har,specifics blob,server_specifics blob);"
+ "INSERT INTO 'metas' VALUES(1,-1,0,0,0," META_PROTO_TIMES_VALS(1)
+ ",'r','r','r','r','r',0,0,0,1,0,0,NULL,NULL,NULL,NULL,X'',X'');"
+ "INSERT INTO 'metas' VALUES(2,669,669,-2097152,4,"
+ META_PROTO_TIMES_VALS(2) ",'s_ID_2','s_ID_9','s_ID_9','s_ID_2','s_ID_"
+ "2',0,0,1,0,0,1,'Deleted Item','Deleted Item',NULL,NULL,X'C28810220A1"
+ "6687474703A2F2F7777772E676F6F676C652E636F6D2F12084141534741534741',X"
+ "'C28810260A17687474703A2F2F7777772E676F6F676C652E636F6D2F32120B41534"
+ "14447414447414447');"
+ "INSERT INTO 'metas' VALUES(4,681,681,-3145728,3,"
+ META_PROTO_TIMES_VALS(4) ",'s_ID_4','s_ID_9','s_ID_9','s_ID_4','s_ID_"
+ "4',0,0,1,0,0,1,'Welcome to Chromium','Welcome to Chromium',NULL,NULL"
+ ",X'C28810350A31687474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6"
+ "D652F696E746C2F656E2F77656C636F6D652E68746D6C1200',X'C28810350A31687"
+ "474703A2F2F7777772E676F6F676C652E636F6D2F6368726F6D652F696E746C2F656"
+ "E2F77656C636F6D652E68746D6C1200');"
+ "INSERT INTO 'metas' VALUES(5,677,677,1048576,7," META_PROTO_TIMES_VALS(5)
+ ",'s_ID_5','s_ID_9','s_ID_9','s_ID_5','s_ID_5',0,0,1,0,0,1,'Google','"
+ "Google',NULL,NULL,X'C28810220A16687474703A2F2F7777772E676F6F676C652E"
+ "636F6D2F12084147415347415347',X'C28810220A16687474703A2F2F7777772E67"
+ "6F6F676C652E636F6D2F12084147464447415347');"
+ "INSERT INTO 'metas' VALUES(6,694,694,-4194304,6,"
+ META_PROTO_TIMES_VALS(6) ",'s_ID_6','s_ID_9','s_ID_9','r','r',0,0,0,1"
+ ",1,0,'The Internet','The Internet',NULL,NULL,X'C2881000',X'C2881000'"
+ ");"
+ "INSERT INTO 'metas' VALUES(7,663,663,1048576,0," META_PROTO_TIMES_VALS(7)
+ ",'s_ID_7','r','r','r','r',0,0,0,1,1,0,'Google Chrome','Goo"
+ "gle Chrome','google_chrome',NULL,NULL,NULL);"
+ "INSERT INTO 'metas' VALUES(8,664,664,1048576,0," META_PROTO_TIMES_VALS(8)
+ ",'s_ID_8','s_ID_7','s_ID_7','r','r',0,0,0,1,1,0,'Bookmarks','Bookmar"
+ "ks','google_chrome_bookmarks',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(9,665,665,1048576,1," META_PROTO_TIMES_VALS(9)
+ ",'s_ID_9','s_ID_8','s_ID_8','r','s_ID_10',0,0,0,1,1,0,'Bookmark Bar'"
+ ",'Bookmark Bar','bookmark_bar',NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(10,666,666,2097152,2,"
+ META_PROTO_TIMES_VALS(10) ",'s_ID_10','s_ID_8','s_ID_8','s_ID_9','r',"
+ "0,0,0,1,1,0,'Other Bookmarks','Other Bookmarks','other_bookmarks',NU"
+ "LL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(11,683,683,-1048576,8,"
+ META_PROTO_TIMES_VALS(11) ",'s_ID_11','s_ID_6','s_ID_6','r','s_ID_13'"
+ ",0,0,0,0,0,0,'Home (The Chromium Projects)','Home (The Chromium Proj"
+ "ects)',NULL,NULL,X'C28810220A18687474703A2F2F6465762E6368726F6D69756"
+ "D2E6F72672F1206414741545741',X'C28810290A1D687474703A2F2F6465762E636"
+ "8726F6D69756D2E6F72672F6F7468657212084146414756415346');"
+ "INSERT INTO 'metas' VALUES(12,685,685,0,9," META_PROTO_TIMES_VALS(12)
+ ",'s_ID_12','s_ID_6','s_"
+ "ID_6','s_ID_13','s_ID_14',0,0,0,1,1,0,'Extra Bookmarks','Extra Bookm"
+ "arks',NULL,NULL,X'C2881000',X'C2881000');"
+ "INSERT INTO 'metas' VALUES(13,687,687,-917504,10,"
+ META_PROTO_TIMES_VALS(13) ",'s_ID_13','s_ID_6','s_ID_6','s_ID_11','s_"
+ "ID_12',0,0,0,0,0,0,'ICANN | Internet Corporation for Assigned Names "
+ "and Numbers','ICANN | Internet Corporation for Assigned Names and Nu"
+ "mbers',NULL,NULL,X'C28810240A15687474703A2F2F7777772E6963616E6E2E636"
+ "F6D2F120B504E474158463041414646',X'C28810200A15687474703A2F2F7777772"
+ "E6963616E6E2E636F6D2F120744414146415346');"
+ "INSERT INTO 'metas' VALUES(14,692,692,1048576,11,"
+ META_PROTO_TIMES_VALS(14) ",'s_ID_14','s_ID_6','s_ID_6','s_ID_12','r'"
+ ",0,0,0,0,0,0,'The WebKit Open Source Project','The WebKit Open Sourc"
+ "e Project',NULL,NULL,X'C288101A0A12687474703A2F2F7765626B69742E6F726"
+ "72F1204504E4758',X'C288101C0A13687474703A2F2F7765626B69742E6F72672F7"
+ "81205504E473259');"
+ "CREATE TABLE 'share_info' (id TEXT primary key, name TEXT, store_birthda"
+ "y TEXT, db_create_version TEXT, db_create_time INT, next_id INT defa"
+ "ult -2, cache_guid TEXT , notification_state BLOB);"
+ "INSERT INTO 'share_info' VALUES('nick@chromium.org','nick@chromium.org',"
+ "'c27e9f59-08ca-46f8-b0cc-f16a2ed778bb','Unknown',1263522064,-65542,'"
+ "9010788312004066376x-6609234393368420856x',NULL);"
+ ));
+ ASSERT_TRUE(connection.CommitTransaction());
+}
+
TEST_F(DirectoryBackingStoreTest, MigrateVersion67To68) {
SetUpVersion67Database();
@@ -1732,25 +1831,10 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion75To76) {
ASSERT_FALSE(dbs->needs_column_refresh_);
ASSERT_TRUE(dbs->MigrateVersion75To76());
ASSERT_EQ(76, dbs->GetVersion());
- ASSERT_TRUE(dbs->needs_column_refresh_);
- dbs->RefreshColumns();
dbs->EndLoad();
-
- sql::Connection connection;
- ASSERT_TRUE(connection.Open(GetDatabasePath()));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_state"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "bookmarks_added_during_autofill_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info", "autofill_migration_time"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_entries_added_during_migration"));
- ASSERT_FALSE(
- connection.DoesColumnExist("share_info",
- "autofill_profiles_added_during_migration"));
+ ASSERT_TRUE(dbs->needs_column_refresh_);
+ // Cannot actual refresh columns due to version 76 not containing all
+ // necessary columns.
}
TEST_F(DirectoryBackingStoreTest, MigrateVersion76To77) {
@@ -1763,25 +1847,42 @@ TEST_F(DirectoryBackingStoreTest, MigrateVersion76To77) {
EXPECT_EQ(GetExpectedLegacyMetaProtoTimes(),
GetMetaProtoTimes(dbs->load_dbhandle_));
- // Since we the proto times are expected to be in a legacy format,
- // they may not be compatible with ProtoTimeToTime, so we don't call
- // ExpectTimes().
+ // Since the proto times are expected to be in a legacy format, they may not
+ // be compatible with ProtoTimeToTime, so we don't call ExpectTimes().
ASSERT_TRUE(dbs->MigrateVersion76To77());
ASSERT_EQ(77, dbs->GetVersion());
EXPECT_EQ(GetExpectedMetaProtoTimes(),
GetMetaProtoTimes(dbs->load_dbhandle_));
+ // Cannot actually load entries due to version 77 not having all required
+ // columns.
+ dbs->EndLoad();
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+}
+TEST_F(DirectoryBackingStoreTest, MigrateVersion77To78) {
+ SetUpVersion77Database();
{
- MetahandlesIndex index;
- STLElementDeleter<MetahandlesIndex> index_deleter(&index);
- dbs->LoadEntries(&index);
- ExpectTimes(index, GetExpectedMetaTimes());
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ ASSERT_FALSE(
+ connection.DoesColumnExist("metas", "BASE_SERVER_SPECIFICS"));
}
+ scoped_ptr<DirectoryBackingStore> dbs(
+ new DirectoryBackingStore(GetUsername(), GetDatabasePath()));
+ dbs->BeginLoad();
+ ASSERT_FALSE(dbs->needs_column_refresh_);
+ ASSERT_TRUE(dbs->MigrateVersion77To78());
+ ASSERT_EQ(78, dbs->GetVersion());
dbs->EndLoad();
ASSERT_FALSE(dbs->needs_column_refresh_);
+
+ sql::Connection connection;
+ ASSERT_TRUE(connection.Open(GetDatabasePath()));
+ ASSERT_TRUE(
+ connection.DoesColumnExist("metas", "base_server_specifics"));
}
TEST_P(MigrationTest, ToCurrentVersion) {
@@ -1816,6 +1917,9 @@ TEST_P(MigrationTest, ToCurrentVersion) {
case 76:
SetUpVersion76Database();
break;
+ case 77:
+ SetUpVersion77Database();
+ break;
default:
// If you see this error, it may mean that you've increased the
// database version number but you haven't finished adding unit tests
@@ -1893,6 +1997,9 @@ TEST_P(MigrationTest, ToCurrentVersion) {
"autofill_entries_added_during_migration"));
ASSERT_FALSE(connection.DoesColumnExist("share_info",
"autofill_profiles_added_during_migration"));
+
+ // Column added in version 78
+ ASSERT_TRUE(connection.DoesColumnExist("metas", "base_server_specifics"));
}
{
syncable::Directory::KernelLoadInfo dir_info;
diff --git a/chrome/browser/sync/syncable/syncable.h b/chrome/browser/sync/syncable/syncable.h
index 6b945c8..5d9bfe8 100644
--- a/chrome/browser/sync/syncable/syncable.h
+++ b/chrome/browser/sync/syncable/syncable.h
@@ -178,6 +178,7 @@ enum {
enum ProtoField {
SPECIFICS = PROTO_FIELDS_BEGIN,
SERVER_SPECIFICS,
+ BASE_SERVER_SPECIFICS,
PROTO_FIELDS_END,
};
@@ -739,7 +740,6 @@ struct Index {
// be held before acquiring the kernel lock.
class ScopedKernelLock;
class IdFilter;
-class DirectoryManager;
class Directory {
friend class BaseTransaction;
diff --git a/chrome/browser/sync/syncable/syncable_columns.h b/chrome/browser/sync/syncable/syncable_columns.h
index ca9c094..b6f3cfb 100644
--- a/chrome/browser/sync/syncable/syncable_columns.h
+++ b/chrome/browser/sync/syncable/syncable_columns.h
@@ -57,6 +57,7 @@ static const ColumnSpec g_metas_columns[] = {
// Blobs.
{"specifics", "blob"},
{"server_specifics", "blob"},
+ {"base_server_specifics", "blob"}
};
// At least enforce that there are equal number of column names and fields.
diff --git a/chrome/browser/sync/syncable/syncable_enum_conversions.cc b/chrome/browser/sync/syncable/syncable_enum_conversions.cc
index 0a371d9..a22a1cf 100644
--- a/chrome/browser/sync/syncable/syncable_enum_conversions.cc
+++ b/chrome/browser/sync/syncable/syncable_enum_conversions.cc
@@ -135,11 +135,12 @@ const char* GetStringFieldString(StringField string_field) {
}
const char* GetProtoFieldString(ProtoField proto_field) {
- ASSERT_ENUM_BOUNDS(SPECIFICS, SERVER_SPECIFICS,
+ ASSERT_ENUM_BOUNDS(SPECIFICS, BASE_SERVER_SPECIFICS,
PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
switch (proto_field) {
ENUM_CASE(SPECIFICS);
ENUM_CASE(SERVER_SPECIFICS);
+ ENUM_CASE(BASE_SERVER_SPECIFICS);
case PROTO_FIELDS_END: break;
}
NOTREACHED();
diff --git a/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc b/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
index 8748125..2b762e4 100644
--- a/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
+++ b/chrome/browser/sync/test/integration/two_client_sessions_sync_test.cc
@@ -256,21 +256,19 @@ IN_PROC_BROWSER_TEST_F(TwoClientSessionsSyncTest,
client1_windows.GetMutable()));
// At this point we enter the passphrase, triggering a resync, in which the
- // local changes of client 1 get overwritten for now.
+ // local changes of client 1 get sent to client 0.
GetClient(1)->service()->SetPassphrase(kValidPassphrase, true);
ASSERT_TRUE(GetClient(1)->AwaitPassphraseAccepted());
ASSERT_TRUE(GetClient(1)->WaitForTypeEncryption(syncable::SESSIONS));
+ ASSERT_TRUE(GetClient(1)->AwaitMutualSyncCycleCompletion(GetClient(0)));
ASSERT_EQ(0, GetClient(1)->GetLastSessionSnapshot()->
num_conflicting_updates);
ASSERT_TRUE(IsEncrypted(0, syncable::SESSIONS));
ASSERT_TRUE(IsEncrypted(1, syncable::SESSIONS));
- // The session data from client 1 got overwritten. As a result, client 0
- // should have no foreign session data. TODO(zea): update this once bug 76596
- // is resolved and we don't choose server wins on encryption conflicts.
SyncedSessionVector sessions0;
SyncedSessionVector sessions1;
- ASSERT_FALSE(GetSessionData(0, &sessions0));
+ ASSERT_TRUE(GetSessionData(0, &sessions0));
ASSERT_FALSE(GetSessionData(1, &sessions1));
}